This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see http://rmarkdown.rstudio.com.
When you click the Knit button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:
# Load required packages
# install.packages("dplyr")
# install.packages("Hmisc")
# install.packages("Matrix")
# install.packages("ggplot2")
# install.packages("emmeans")
# install.packages("lsmeans")
# install.packages("effects")
# install.packages("nlme")
# install.packages("lme4")
# install.packages("lmerTest")
# install.packages("sjPlot")
# install.packages("stats")
# install.packages("lmerTest")
library(dplyr, quietly = TRUE, warn.conflicts = FALSE)
library(Hmisc, quietly = TRUE, warn.conflicts = FALSE)
library(ggplot2, quietly = TRUE, warn.conflicts = FALSE)
library(emmeans, quietly = TRUE, warn.conflicts = FALSE)
library(lsmeans, quietly = TRUE, warn.conflicts = FALSE)
## The 'lsmeans' package is now basically a front end for 'emmeans'.
## Users are encouraged to switch the rest of the way.
## See help('transition') for more information, including how to
## convert old 'lsmeans' objects and scripts to work with 'emmeans'.
library(effects, quietly = TRUE, warn.conflicts = FALSE)
## lattice theme set by effectsTheme()
## See ?effectsTheme for details.
library(Matrix, quietly = TRUE, warn.conflicts = FALSE)
# library(nlme, quietly = TRUE, warn.conflicts = FALSE)
library(lme4, quietly = TRUE, warn.conflicts = FALSE)
library(lmerTest, quietly = TRUE, warn.conflicts = FALSE)
library(sjPlot, quietly = TRUE, warn.conflicts = FALSE)
## Learn more about sjPlot with 'browseVignettes("sjPlot")'.
library(stats, quietly = TRUE, warn.conflicts = FALSE)
ZOO_good <- read.csv(file = '/Users/aysuerdemir/Desktop/R workspace/ERP_Zoo/CrossSectional/Mix/ZOO_good.csv')
# Remove the added X variable after reading it back into R:
ZOO_good <- ZOO_good %>%
dplyr::select(-X)
# Display the first 6 rows of te final dataset:
head(ZOO_good)
## Subject Condition Emotion Trial.Type Laterality Electrode MeanAmp_P2
## 1 AE050318 Go Neg NegGo Midline NA -8.6567894
## 2 AE050318 Go Neut NeutGo Left NA -8.0614951
## 3 AE050318 Go Neg NegGo Left NA -8.2741357
## 4 AE050318 NoGo Neg NegNoGo Right NA 3.0738593
## 5 AE050318 Go Neg NegGo Right NA -10.9978696
## 6 AE050318 NoGo Neg NegNoGo Midline NA 0.1368089
## MeanAmp_N2 MeanAmp_P3 Latency_P2 PeakAmp_P2 Latency_N2 PeakAmp_N2 Latency_P3
## 1 -17.6405009 4.369997 212 -6.242593 352 -23.058663 612
## 2 -12.3241910 6.126728 212 -6.498847 388 -14.786896 516
## 3 -12.4529149 7.006061 200 -6.982536 368 -17.043367 512
## 4 -0.6563204 0.449258 232 5.512485 400 -3.018455 508
## 5 -17.0612959 -1.012820 208 -8.224361 352 -20.626624 576
## 6 -2.7636031 3.345500 252 3.300071 452 -5.257907 600
## PeakAmp_P3 MeanAmp_N2P2 PeakAmp_N2P2 calculator_age_cve calculator_gender_cve
## 1 6.037603 -8.983711 -16.816070 38.1 0
## 2 8.529244 -4.262696 -8.288049 38.1 0
## 3 9.862223 -4.178779 -10.060831 38.1 0
## 4 3.116096 -3.730180 -8.530940 38.1 0
## 5 0.844127 -6.063426 -12.402263 38.1 0
## 6 6.301873 -2.900412 -8.557978 38.1 0
## race ethnicity calculator_talkergroup_parent tso_calculated
## 1 2 0 1 1.9
## 2 2 0 1 1.9
## 3 2 0 1 1.9
## 4 2 0 1 1.9
## 5 2 0 1 1.9
## 6 2 0 1 1.9
## disfluency_sldper100words ssi_total disfluency_sldper100words_final
## 1 12 23 12
## 2 12 23 12
## 3 12 23 12
## 4 12 23 12
## 5 12 23 12
## 6 12 23 12
## talkergroup_final gfta_standard ppvt_standard evt_standard teld_rec_standard
## 1 1 121 126 123 146
## 2 1 121 126 123 146
## 3 1 121 126 123 146
## 4 1 121 126 123 146
## 5 1 121 126 123 146
## 6 1 121 126 123 146
## teld_exp_standard teld_spokenlang_standard tocs_1_total tocs_2_total
## 1 135 149 22 6
## 2 135 149 22 6
## 3 135 149 22 6
## 4 135 149 22 6
## 5 135 149 22 6
## 6 135 149 22 6
## tcs_total eprime_condorder_zootask cve_comments comments_tasks handedness_zoo
## 1 25 1 NA
## 2 25 1 NA
## 3 25 1 NA
## 4 25 1 NA
## 5 25 1 NA
## 6 25 1 NA
## accuracy hit_falsealarm premature_responses RT_proper RT_premature
## 1 75.83333 0.7583333 9.9009901 774.044 87.10
## 2 90.00000 0.9000000 0.9174312 802.213 151.00
## 3 75.83333 0.7583333 9.9009901 774.044 87.10
## 4 57.50000 0.3250000 23.5294118 566.000 111.75
## 5 75.83333 0.7583333 9.9009901 774.044 87.10
## 6 57.50000 0.3250000 23.5294118 566.000 111.75
## sensitivity part_id_status.x redcap_event_name.x inhibit shift emotionalCntrl
## 1 1.154714 AE050318 t1_arm_1 36 15 13
## 2 1.956041 AE050318 t1_arm_1 36 15 13
## 3 1.154714 AE050318 t1_arm_1 36 15 13
## 4 1.154714 AE050318 t1_arm_1 36 15 13
## 5 1.154714 AE050318 t1_arm_1 36 15 13
## 6 1.154714 AE050318 t1_arm_1 36 15 13
## workingMemory planOrganize BehavioralRegulationIndex_BRI
## 1 37 20 64
## 2 37 20 64
## 3 37 20 64
## 4 37 20 64
## 5 37 20 64
## 6 37 20 64
## MetacognitionIndex_MI GlobalExecutiveComposite_GEC
## 1 57 121
## 2 57 121
## 3 57 121
## 4 57 121
## 5 57 121
## 6 57 121
## InhibitorySelfControlIndex_ISCI FlexibilityIndex_FI initiate orgofMaterials
## 1 49 28 NA NA
## 2 49 28 NA NA
## 3 49 28 NA NA
## 4 49 28 NA NA
## 5 49 28 NA NA
## 6 49 28 NA NA
## monitor part_id_status.y redcap_event_name.y activity_level anger_frustration
## 1 NA AE050318 t1_arm_1 5.307692 3.923077
## 2 NA AE050318 t1_arm_1 5.307692 3.923077
## 3 NA AE050318 t1_arm_1 5.307692 3.923077
## 4 NA AE050318 t1_arm_1 5.307692 3.923077
## 5 NA AE050318 t1_arm_1 5.307692 3.923077
## 6 NA AE050318 t1_arm_1 5.307692 3.923077
## approach attention_focus attention_shift discomfort soothability fear
## 1 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## 2 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## 3 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## 4 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## 5 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## 6 3.846154 2.666667 5.2 3.416667 5.307692 3.666667
## hi_intense_pleas impulsivity inhibit_control lo_instense_pleas
## 1 5.307692 4.307692 4 5.166667
## 2 5.307692 4.307692 4 5.166667
## 3 5.307692 4.307692 4 5.166667
## 4 5.307692 4.307692 4 5.166667
## 5 5.307692 4.307692 4 5.166667
## 6 5.307692 4.307692 4 5.166667
## percept_sensitive sadness shyness smiling_laughter shy_r sth_r
## 1 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## 2 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## 3 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## 4 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## 5 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## 6 4.666667 2.916667 3.692308 6.307692 4.307692 2.692308
## surgency effortful_con neg_affect
## 1 4.807692 4.125 3.323077
## 2 4.807692 4.125 3.323077
## 3 4.807692 4.125 3.323077
## 4 4.807692 4.125 3.323077
## 5 4.807692 4.125 3.323077
## 6 4.807692 4.125 3.323077
print(names(ZOO_good))
## [1] "Subject" "Condition"
## [3] "Emotion" "Trial.Type"
## [5] "Laterality" "Electrode"
## [7] "MeanAmp_P2" "MeanAmp_N2"
## [9] "MeanAmp_P3" "Latency_P2"
## [11] "PeakAmp_P2" "Latency_N2"
## [13] "PeakAmp_N2" "Latency_P3"
## [15] "PeakAmp_P3" "MeanAmp_N2P2"
## [17] "PeakAmp_N2P2" "calculator_age_cve"
## [19] "calculator_gender_cve" "race"
## [21] "ethnicity" "calculator_talkergroup_parent"
## [23] "tso_calculated" "disfluency_sldper100words"
## [25] "ssi_total" "disfluency_sldper100words_final"
## [27] "talkergroup_final" "gfta_standard"
## [29] "ppvt_standard" "evt_standard"
## [31] "teld_rec_standard" "teld_exp_standard"
## [33] "teld_spokenlang_standard" "tocs_1_total"
## [35] "tocs_2_total" "tcs_total"
## [37] "eprime_condorder_zootask" "cve_comments"
## [39] "comments_tasks" "handedness_zoo"
## [41] "accuracy" "hit_falsealarm"
## [43] "premature_responses" "RT_proper"
## [45] "RT_premature" "sensitivity"
## [47] "part_id_status.x" "redcap_event_name.x"
## [49] "inhibit" "shift"
## [51] "emotionalCntrl" "workingMemory"
## [53] "planOrganize" "BehavioralRegulationIndex_BRI"
## [55] "MetacognitionIndex_MI" "GlobalExecutiveComposite_GEC"
## [57] "InhibitorySelfControlIndex_ISCI" "FlexibilityIndex_FI"
## [59] "initiate" "orgofMaterials"
## [61] "monitor" "part_id_status.y"
## [63] "redcap_event_name.y" "activity_level"
## [65] "anger_frustration" "approach"
## [67] "attention_focus" "attention_shift"
## [69] "discomfort" "soothability"
## [71] "fear" "hi_intense_pleas"
## [73] "impulsivity" "inhibit_control"
## [75] "lo_instense_pleas" "percept_sensitive"
## [77] "sadness" "shyness"
## [79] "smiling_laughter" "shy_r"
## [81] "sth_r" "surgency"
## [83] "effortful_con" "neg_affect"
# DATAFRAME FOR ERP and BEHAVIORAL ANALYSES:
ZOO_FCz_Pz <- subset(ZOO_good, (Laterality == "Midline"))
rownames(ZOO_FCz_Pz) <- NULL # reset index
# DATAFRAME FOR AGE-GENDER ANALYSES:
# Create a dataset with only 1 entry for each subject:
# Pick one condition - randomly - because we are only working on age and gender for this one:
ZOO_age_gender <- subset(ZOO_FCz_Pz, (Trial.Type == "NegGo"))
rownames(ZOO_age_gender) <- NULL # reset index
# Give Age, Gender and Count Summary Statistics for each Group:
ZOO_age_gender_summary <-
ZOO_age_gender %>%
group_by(talkergroup_final) %>%
summarise(
age = mean(calculator_age_cve, na.rm = TRUE),
min_age = min(calculator_age_cve, na.rm = TRUE),
max_age = max(calculator_age_cve, na.rm = TRUE),
male_gender_count = sum(calculator_gender_cve, na.rm = TRUE), # because male = 1, female = 0
SD_age = sd(calculator_age_cve, na.rm = TRUE),
Count_kids = n_distinct(Subject)
)
print(ZOO_age_gender_summary)
## # A tibble: 2 × 7
## talkergroup_final age min_age max_age male_gender_count SD_age Count_kids
## <int> <dbl> <dbl> <dbl> <int> <dbl> <int>
## 1 0 57.5 38 81.7 23 12.6 42
## 2 1 58.4 36.6 79.4 22 13.1 37
# Histogram of Age across all participants
hist(ZOO_age_gender$calculator_age_cve,
main = "Histogram of Age",
xlab = "age in months",
ylab = "Frequency",
col = "blue", # Color of bars
border = "black", # Border color of bars
# xlim = c(min_value, max_value), # Adjust the x-axis limits if needed
breaks = 12)
# print(quantile(ZOO$calculator_age_cve, 0.50, na.rm = TRUE),)
# CREATE TWO PLOTS FOR CWS AND CWNS
# Create a new plot
par(mfrow=c(1,2)) # This sets up a 1x2 grid for side-by-side histograms
# Histogram for talkergroup_final == 1
hist(ZOO_age_gender$calculator_age_cve[ZOO_age_gender$talkergroup_final == 0],
main = "Histogram for CWNS",
xlab = "age in months",
ylab = "Frequency",
col = "blue",
border = "black",
xlim = c(30, 100),
breaks = 12)
# Histogram for talkergroup_final == 2
hist(ZOO_age_gender$calculator_age_cve[ZOO_age_gender$talkergroup_final == 1],
main = "Histogram for CWS",
xlab = "age in months",
ylab = "Frequency",
col = "red", # Use a different color for the second group
border = "black",
xlim = c(30, 100),
breaks = 12)
# Reset the plotting to a single plot
par(mfrow=c(1,1))
# MAKE SURE AGE DOES NOT DIFFER BETWEEN GROUPS USING ANOVA
model <- aov (calculator_age_cve ~ talkergroup_final, data = ZOO_age_gender)
anova(model)
## Analysis of Variance Table
##
## Response: calculator_age_cve
## Df Sum Sq Mean Sq F value Pr(>F)
## talkergroup_final 1 16 15.972 0.0969 0.7564
## Residuals 77 12688 164.786
# MAKE SURE AGE DOES NOT DIFFER BETWEEN GROUPS USING T-Test
# Subset the data into two groups based on talkergroup_final
group_CWNS <- ZOO_age_gender$calculator_age_cve[ZOO_age_gender$talkergroup_final == 0]
group_CWS <- ZOO_age_gender$calculator_age_cve[ZOO_age_gender$talkergroup_final == 1]
# Perform a two-sample t-test
t_test_result <- t.test(group_CWNS, group_CWS, alternative = "two.sided")
# Display the t-test result
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: group_CWNS and group_CWS
## t = -0.31049, df = 74.829, p-value = 0.757
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -6.682676 4.880488
## sample estimates:
## mean of x mean of y
## 57.48810 58.38919
# Histogram of Accuracy, premature_responses, and RT_proper
summary_behavioral_hist <-
ZOO_FCz_Pz %>%
group_by(Subject, talkergroup_final, Trial.Type) %>%
summarise(Average_accuracy = mean(accuracy, na.rm = TRUE),
Average_premature_responses = mean(premature_responses, na.rm = TRUE),
Sensitivity = mean(sensitivity, na.rm = TRUE),
Average_RT = mean(RT_proper, na.rm = TRUE))
## `summarise()` has grouped output by 'Subject', 'talkergroup_final'. You can
## override using the `.groups` argument.
print(summary_behavioral_hist)
## # A tibble: 316 × 7
## # Groups: Subject, talkergroup_final [79]
## Subject talkergroup_final Trial.Type Average_accuracy Average_premature_re…¹
## <chr> <int> <chr> <dbl> <dbl>
## 1 AE050318 1 NegGo 75.8 9.90
## 2 AE050318 1 NegNoGo 57.5 23.5
## 3 AE050318 1 NeutGo 90 0.917
## 4 AE050318 1 NeutNoGo 70 16.7
## 5 AH101121 0 NegGo 99.2 0
## 6 AH101121 0 NegNoGo 87.5 0
## 7 AH101121 0 NeutGo 100 0
## 8 AH101121 0 NeutNoGo 80 0
## 9 AK022218 0 NegGo 90 0.917
## 10 AK022218 0 NegNoGo 95 0
## # ℹ 306 more rows
## # ℹ abbreviated name: ¹Average_premature_responses
## # ℹ 2 more variables: Sensitivity <dbl>, Average_RT <dbl>
# Sensitivity Histogram:
# Create a new plot
par(mfrow=c(1,2)) # This sets up a 1x2 grid for side-by-side histograms
# Histogram for talkergroup_final == 0
hist(summary_behavioral_hist$Sensitivity[summary_behavioral_hist$talkergroup_final == 0],
main = "Histogram for CWNS",
xlab = "Sensitivity",
ylab = "Frequency",
col = "blue",
border = "black",
#xlim = c(0, 100),
#ylim = c(0, 50),
breaks = 12)
# Histogram for talkergroup_final == 1
hist(summary_behavioral_hist$Sensitivity[summary_behavioral_hist$talkergroup_final == 1],
main = "Histogram for CWS",
xlab = "Sensitivity",
ylab = "Frequency",
col = "red", # Use a different color for the second group
border = "black",
#xlim = c(0, 100),
#ylim = c(0, 50),
breaks = 12)
# Reset the plotting to a single plot
par(mfrow=c(1,1))
# Accuracy Histogram:
# Create a new plot
par(mfrow=c(1,2)) # This sets up a 1x2 grid for side-by-side histograms
# Histogram for talkergroup_final == 0
hist(summary_behavioral_hist$Average_accuracy[summary_behavioral_hist$talkergroup_final == 0],
main = "Histogram for CWNS",
xlab = "accuracy",
ylab = "Frequency",
col = "blue",
border = "black",
xlim = c(0, 100),
ylim = c(0, 50),
breaks = 12)
# Histogram for talkergroup_final == 1
hist(summary_behavioral_hist$Average_accuracy[summary_behavioral_hist$talkergroup_final == 1],
main = "Histogram for CWS",
xlab = "accuracy",
ylab = "Frequency",
col = "red", # Use a different color for the second group
border = "black",
xlim = c(0, 100),
ylim = c(0, 50),
breaks = 12)
# Reset the plotting to a single plot
par(mfrow=c(1,1))
# Scatterplot of all accuracy across all participants
scatterplot <- ggplot(summary_behavioral_hist, aes(x = Subject, y = Average_accuracy, color = Trial.Type)) +
geom_point() +
scale_y_continuous(limits = c(0, 100), breaks = seq(0, 100, by = 10))
plot(scatterplot)
# Premature responses Histogram:
# Create a new plot
par(mfrow=c(1,2))
# Histogram for talkergroup_final == 0
hist(summary_behavioral_hist$Average_premature_responses[summary_behavioral_hist$talkergroup_final == 0],
main = "Histogram for CWNS",
xlab = "premature_responses",
ylab = "Frequency",
col = "blue",
border = "black",
xlim = c(0, 50),
ylim = c(0, 150),
breaks = 12)
# Histogram for talkergroup_final == 1
hist(summary_behavioral_hist$Average_premature_responses[summary_behavioral_hist$talkergroup_final == 1],
main = "Histogram for CWS",
xlab = "premature_responses",
ylab = "Frequency",
col = "red", # Use a different color for the second group
border = "black",
xlim = c(0, 50),
ylim = c(0, 150),
breaks = 12)
# Reset the plotting to a single plot
par(mfrow=c(1,1))
# Scatterplot of all accuracy across all participants
scatterplot <- ggplot(summary_behavioral_hist, aes(x = Subject, y = Average_premature_responses, color = Trial.Type)) +
geom_point() +
scale_y_continuous(limits = c(0, 50), breaks = seq(0, 50, by = 10))
plot(scatterplot)
## Warning: Removed 4 rows containing missing values (`geom_point()`).
#Reaction Time Histogram:
# Create a new plot
par(mfrow=c(1,2))
# Histogram for talkergroup_final == 0
hist(summary_behavioral_hist$Average_RT[summary_behavioral_hist$talkergroup_final == 0],
main = "Histogram for CWNS",
xlab = "reaction_time",
ylab = "Frequency",
col = "blue",
border = "black",
xlim = c(100, 2000),
ylim = c(0, 50),
breaks = 12)
# Histogram for talkergroup_final == 1
hist(summary_behavioral_hist$Average_RT[summary_behavioral_hist$talkergroup_final == 1],
main = "Histogram for CWS",
xlab = "reaction_time",
ylab = "Frequency",
col = "red", # Use a different color for the second group
border = "black",
xlim = c(100, 2000),
ylim = c(0, 50),
breaks = 12)
# Reset the plotting to a single plot
par(mfrow=c(1,1))
# Scatterplot of all reaction time across all participants
scatterplot <- ggplot(summary_behavioral_hist, aes(x = Subject, y = Average_RT, color = Trial.Type)) +
geom_point() +
scale_y_continuous(limits = c(200, 2000), breaks = seq(200, 2000, by = 200))
plot(scatterplot)
## Warning: Removed 5 rows containing missing values (`geom_point()`).
# Create the Dataframe to get the behavioral data
summary_behavioral <-
ZOO_FCz_Pz%>%
group_by(talkergroup_final, Emotion, Condition) %>%
summarise(
Average_accuracy = mean(accuracy, na.rm = TRUE),
SEM_accuracy = sd(accuracy, na.rm = TRUE) / sqrt(n()),
Average_hit_falsealarm = mean(hit_falsealarm, na.rm = TRUE),
SEM_hit_falsealarm = sd(hit_falsealarm, na.rm = TRUE) / sqrt(n()),
Average_premature_responses = mean(premature_responses, na.rm = TRUE),
SEM_premature_responses = sd(premature_responses, na.rm = TRUE) / sqrt(n()),
Average_RT_proper = mean(RT_proper, na.rm = TRUE),
SEM_RT_proper = sd(RT_proper, na.rm = TRUE) / sqrt(n()),
Average_RT_premature = mean(RT_premature, na.rm = TRUE),
SEM_RT_premature = sd(RT_premature, na.rm = TRUE) / sqrt(n()),
)
## `summarise()` has grouped output by 'talkergroup_final', 'Emotion'. You can
## override using the `.groups` argument.
# Rename some factors for better visuals:
summary_behavioral$talkergroup_final <- factor(summary_behavioral$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
summary_behavioral$Emotion <- factor(summary_behavioral$Emotion, levels = c("Neg", "Neut"), labels = c("Affective", "Neutral"))
# Create the Dataframe to get the behavioral data
summary_sensitivity <-
ZOO_FCz_Pz%>%
group_by(talkergroup_final, Emotion) %>%
summarise(
Average_sensitivity = mean(sensitivity, na.rm = TRUE),
SEM_sensitivity = sd(sensitivity, na.rm = TRUE) / sqrt(n()),
)
## `summarise()` has grouped output by 'talkergroup_final'. You can override using
## the `.groups` argument.
# Rename some factors for better visuals:
summary_sensitivity$talkergroup_final <- factor(summary_sensitivity$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
summary_sensitivity$Emotion <- factor(summary_sensitivity$Emotion, levels = c("Neg", "Neut"), labels = c("Affective", "Neutral"))
# BAR PLOT FOR SENSITIVITY
bar_plot <- ggplot(summary_sensitivity, aes(x = talkergroup_final, y = Average_sensitivity)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = Average_sensitivity - SEM_sensitivity, ymax = Average_sensitivity + SEM_sensitivity), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Talkergroup", y = "Sensitivity (d')") +
facet_grid(. ~ Emotion) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
ggtitle("Sensitivity by Emotion for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(1,3))
print(bar_plot)
# Sensitivity takes into account both the hit rate (correctly identifying the target stimulus) and the false alarm rate (incorrectly responding to the non-target stimulus), and provides a more nuanced measure of the participant's performance than accuracy alone.
# Measuring sensitivity using hit rate and false alarm rate in the context of a go nogo task provides information about the individual's ability to distinguish between the target stimulus (go) and the non-target stimulus (nogo). It allows us to assess how well the individual can detect and respond to the relevant stimulus (go), while inhibiting responses to the irrelevant stimulus (nogo). This information can be useful in identifying individuals with attentional deficits or impulsivity, as they may have a higher false alarm rate, indicating difficulty in inhibiting responses to the nogo stimulus. It can also provide insights into the cognitive processes involved in response inhibition and decision-making.
# High Sensitivity: Indicates a high ability to correctly identify Go stimuli (hits) while minimizing false alarms to No-Go stimuli. This is generally desirable as it reflects a high level of accuracy in distinguishing between Go and No-Go trials.
# Low Sensitivity: Suggests a lower ability to differentiate between Go and No-Go stimuli. This could mean more misses (failing to respond to Go stimuli) and/or more false alarms (responding to No-Go stimuli). In the context of a Go/No-Go task, a low sensitivity measure could be indicative of a reduced ability to discriminate between the two types of stimuli.
# High Sensitivity: The participant or model is effective at accurately responding to Go stimuli while correctly refraining from responding to No-Go stimuli.
# Low Sensitivity: The participant or model may struggle to distinguish Go stimuli from No-Go stimuli, leading to more errors, such as misses (failing to respond to Go stimuli) and false alarms (incorrectly responding to No-Go stimuli).
# BAR PLOT FOR ACCURACY
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_accuracy, fill = Condition)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_accuracy - SEM_accuracy, ymax = Average_accuracy + SEM_accuracy), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Average Accuracy (%)") +
facet_grid(. ~ talkergroup_final) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(50, 100))
print(bar_plot)
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_accuracy, fill = talkergroup_final)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_accuracy - SEM_accuracy, ymax = Average_accuracy + SEM_accuracy), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Average Accuracy (%)") +
facet_grid(. ~ Condition) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(50, 100))
print(bar_plot)
# BAR PLOT FOR HIT AND FALSE ALARM ONLY FOR NOGO
summary_behavioral_NoGo <- subset(summary_behavioral, Condition == "NoGo")
bar_plot <- ggplot(summary_behavioral_NoGo, aes(x = talkergroup_final, y = Average_hit_falsealarm, fill = Emotion)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = Average_hit_falsealarm - SEM_hit_falsealarm, ymax = Average_hit_falsealarm + SEM_hit_falsealarm), width = 0.2, position = position_dodge(width = 0.9)) +
labs(x = "Emotion", y = "False Alarm Rate - NoGo") +
facet_grid(. ~ Emotion) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
# bar_plot <- bar_plot + coord_cartesian(ylim = c(650, 850))
print(bar_plot)
# BAR PLOT FOR PREMATURE RESPONSES
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_premature_responses, fill = Condition)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_premature_responses - SEM_premature_responses, ymax = Average_premature_responses + SEM_premature_responses), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Proportion of Premature Responses (%)") +
facet_grid(. ~ talkergroup_final) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(0, 10))
print(bar_plot)
# BAR PLOT FOR PREMATURE RESPONSES
summary_behavioral$Condition <- factor(summary_behavioral$Condition, levels = c("Go", "NoGo"), labels = c("Go Correct", "NoGo Incorrect"))
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_premature_responses, fill = talkergroup_final)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_premature_responses - SEM_premature_responses, ymax = Average_premature_responses + SEM_premature_responses), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Premature Responses (proportion %)") +
facet_grid(. ~ Condition) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Premature Responses by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(0, 10))
print(bar_plot)
# BAR PLOT FOR premature_responses - GO ONLY
summary_behavioral_Go <- subset(summary_behavioral, Condition == "Go Correct")
bar_plot <- ggplot(summary_behavioral_Go, aes(x = talkergroup_final, y = Average_premature_responses, fill = Emotion)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = Average_premature_responses - SEM_premature_responses, ymax = Average_premature_responses + SEM_premature_responses), width = 0.2, position = position_dodge(width = 0.9)) +
labs(x = "talkergroup_final", y = "Average premature_responses rate % - GO") +
facet_grid(. ~ Emotion) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
# bar_plot <- bar_plot + coord_cartesian(ylim = c(650, 850))
print(bar_plot)
# BAR PLOT FOR REACTION TIME - GO ONLY
bar_plot <- ggplot(summary_behavioral_Go, aes(x = Emotion, y = Average_RT_proper, fill = talkergroup_final)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = Average_RT_proper - SEM_RT_proper, ymax = Average_RT_proper + SEM_RT_proper), width = 0.2, position = position_dodge(width = 0.9)) +
labs(x = "Emotion", y = "Average Reaction Time (ms) - GO correct") +
facet_grid(. ~ talkergroup_final) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(650, 850))
print(bar_plot)
# BAR PLOT FOR REACTION TIME - both go and nogo
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_RT_proper, fill = talkergroup_final)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_RT_proper - SEM_RT_proper, ymax = Average_RT_proper + SEM_RT_proper), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Average Reaction Time (ms)") +
facet_grid(. ~ Condition) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
bar_plot <- bar_plot + coord_cartesian(ylim = c(500, 850))
print(bar_plot)
# BAR PLOT FOR Premature Responses REACTION TIME
bar_plot <- ggplot(summary_behavioral, aes(x = Emotion, y = Average_RT_premature, fill = talkergroup_final)) +
geom_bar(stat = "identity", position = "dodge", width = 0.7) +
geom_errorbar(aes(ymin = Average_RT_premature - SEM_RT_premature, ymax = Average_RT_premature + SEM_RT_premature), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "Emotion", y = "Premature responses - Average Reaction Time (ms)") +
facet_grid(. ~ Condition) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
# ggtitle("Average Accuracy by Emotion and Condition for CWS and CWNS")
# bar_plot <- bar_plot + coord_cartesian(ylim = c(500, 850))
print(bar_plot)
The warning “non-integer counts in a binomial glm!” is because the
glmer() function with family = binomial
expects the response to be counts of successes and failures, which
should be integers. However, in your model, accuracy_prop
and 1 - accuracy_prop are proportions, not counts, so they
are not integers.
# ACCURACY
ZOO_FCz_Pz_NOGO <- subset(ZOO_FCz_Pz, (Condition == "NoGo" & (Emotion == "Neut" | Emotion == "Neg")))
ZOO_FCz_Pz_GO <- subset(ZOO_FCz_Pz, (Condition == "Go" & (Emotion == "Neut" | Emotion == "Neg")))
ZOO_FCz_Pz_NOGO_neut <- subset(ZOO_FCz_Pz, (Condition == "NoGo" & Emotion == "Neut"))
ZOO_FCz_Pz_NOGO_aff <- subset(ZOO_FCz_Pz, (Condition == "NoGo" & Emotion == "Neg"))
ZOO_FCz_Pz_GO_neut <- subset(ZOO_FCz_Pz, (Condition == "Go" & Emotion == "Neut"))
ZOO_FCz_Pz_GO_aff <- subset(ZOO_FCz_Pz, (Condition == "Go" & Emotion == "Neg"))
library(stats)
# Perform the Mann-Whitney U test
# ACCURACY
mwu_result <- wilcox.test(accuracy ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_neut)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: accuracy by talkergroup_final
## W = 957, p-value = 0.07734
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(accuracy ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_aff)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: accuracy by talkergroup_final
## W = 871.5, p-value = 0.355
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(accuracy ~ talkergroup_final, data = ZOO_FCz_Pz_GO_neut)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: accuracy by talkergroup_final
## W = 866, p-value = 0.3841
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(accuracy ~ talkergroup_final, data = ZOO_FCz_Pz_GO_aff)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: accuracy by talkergroup_final
## W = 901.5, p-value = 0.2224
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(hit_falsealarm ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_neut)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: hit_falsealarm by talkergroup_final
## W = 639.5, p-value = 0.1776
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(hit_falsealarm ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_aff)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: hit_falsealarm by talkergroup_final
## W = 681, p-value = 0.3471
## alternative hypothesis: true location shift is not equal to 0
model1 <- lmer(accuracy ~ talkergroup_final*Emotion*Condition +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 265.3 265.3 1 75 2.7769 0.09981
## Emotion 80.4 80.4 1 231 0.8416 0.35988
## Condition 9813.9 9813.9 1 231 102.7076 < 2e-16
## calculator_gender_cve 77.4 77.4 1 75 0.8103 0.37090
## calculator_age_cve 614.0 614.0 1 75 6.4262 0.01333
## talkergroup_final:Emotion 0.5 0.5 1 231 0.0050 0.94364
## talkergroup_final:Condition 127.1 127.1 1 231 1.3298 0.25004
## Emotion:Condition 37.9 37.9 1 231 0.3969 0.52932
## talkergroup_final:Emotion:Condition 78.7 78.7 1 231 0.8239 0.36498
##
## talkergroup_final .
## Emotion
## Condition ***
## calculator_gender_cve
## calculator_age_cve *
## talkergroup_final:Emotion
## talkergroup_final:Condition
## Emotion:Condition
## talkergroup_final:Emotion:Condition
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "Condition", "talkergroup_final"))
# Premature Responses
mwu_result <- wilcox.test(premature_responses ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_neut)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: premature_responses by talkergroup_final
## W = 519, p-value = 0.003733
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(premature_responses ~ talkergroup_final, data = ZOO_FCz_Pz_NOGO_aff)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: premature_responses by talkergroup_final
## W = 711, p-value = 0.6032
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(premature_responses ~ talkergroup_final, data = ZOO_FCz_Pz_GO_neut)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: premature_responses by talkergroup_final
## W = 525.5, p-value = 0.01688
## alternative hypothesis: true location shift is not equal to 0
mwu_result <- wilcox.test(premature_responses ~ talkergroup_final, data = ZOO_FCz_Pz_GO_aff)
## Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...): cannot
## compute exact p-value with ties
print(mwu_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: premature_responses by talkergroup_final
## W = 689.5, p-value = 0.477
## alternative hypothesis: true location shift is not equal to 0
model1 <- lmer(premature_responses ~ talkergroup_final*Emotion*Condition +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 94.584 94.584 1 74 4.3914 0.03954
## Emotion 76.008 76.008 1 228 3.5290 0.06158
## Condition 122.022 122.022 1 228 5.6654 0.01813
## calculator_gender_cve 1.611 1.611 1 74 0.0748 0.78523
## calculator_age_cve 224.155 224.155 1 74 10.4073 0.00187
## talkergroup_final:Emotion 74.627 74.627 1 228 3.4649 0.06397
## talkergroup_final:Condition 113.638 113.638 1 228 5.2761 0.02253
## Emotion:Condition 88.752 88.752 1 228 4.1207 0.04352
## talkergroup_final:Emotion:Condition 59.241 59.241 1 228 2.7505 0.09860
##
## talkergroup_final *
## Emotion .
## Condition *
## calculator_gender_cve
## calculator_age_cve **
## talkergroup_final:Emotion .
## talkergroup_final:Condition *
## Emotion:Condition *
## talkergroup_final:Emotion:Condition .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "Condition", "talkergroup_final"))
emmeans_result <- emmeans(model1, pairwise ~ talkergroup_final | Condition) # Tukey is default
## NOTE: Results may be misleading due to involvement in interactions
pairs(emmeans_result, adjust= "none", simple = "each") # adjust = "bonferroni"
## $`simple contrasts for talkergroup_final`
## Condition = Go:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -0.483 0.965 142 -0.501 0.6174
##
## Condition = NoGo:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -2.904 0.965 142 -3.010 0.0031
##
## Results are averaged over the levels of: Emotion, calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
##
## $`simple contrasts for Condition`
## talkergroup_final = 0:
## contrast estimate SE df t.ratio p.value
## Go - NoGo -1.70 0.716 228 -2.380 0.0181
##
## talkergroup_final = 1:
## contrast estimate SE df t.ratio p.value
## Go - NoGo -4.13 0.773 228 -5.334 <.0001
##
## Results are averaged over the levels of: Emotion, calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
model1 <- lmer(premature_responses ~ talkergroup_final*Emotion +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz_GO, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 1.4341 1.4341 1 74 0.6152 0.4353
## Emotion 0.2468 0.2468 1 76 0.1059 0.7458
## calculator_gender_cve 5.2255 5.2255 1 74 2.2417 0.1386
## calculator_age_cve 10.5062 10.5062 1 74 4.5071 0.0371 *
## talkergroup_final:Emotion 0.4436 0.4436 1 76 0.1903 0.6639
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "talkergroup_final"))
emmeans_result <- emmeans(model1, pairwise ~ talkergroup_final | Emotion) # Tukey is default
pairs(emmeans_result, adjust= "none", simple = "each") # adjust = "bonferroni"
## $`simple contrasts for talkergroup_final`
## Emotion = Neg:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -0.295 0.568 106 -0.519 0.6047
##
## Emotion = Neut:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -0.509 0.568 106 -0.896 0.3724
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
##
## $`simple contrasts for Emotion`
## talkergroup_final = 0:
## contrast estimate SE df t.ratio p.value
## Neg - Neut -0.108 0.333 76 -0.325 0.7458
##
## talkergroup_final = 1:
## contrast estimate SE df t.ratio p.value
## Neg - Neut -0.322 0.360 76 -0.896 0.3732
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
model1 <- lmer(premature_responses ~ talkergroup_final*Emotion +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz_NOGO, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 226.15 226.15 1 74 5.6788 0.019740 *
## Emotion 164.51 164.51 1 76 4.1310 0.045597 *
## calculator_gender_cve 2.68 2.68 1 74 0.0673 0.795960
## calculator_age_cve 432.12 432.12 1 74 10.8509 0.001517 **
## talkergroup_final:Emotion 133.42 133.42 1 76 3.3504 0.071110 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "talkergroup_final"))
emmeans_result <- emmeans(model1, pairwise ~ talkergroup_final | Emotion) # Tukey is default
pairs(emmeans_result, adjust= "none", simple = "each") # adjust = "bonferroni"
## $`simple contrasts for talkergroup_final`
## Emotion = Neg:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -1.13 1.61 143 -0.701 0.4842
##
## Emotion = Neut:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 -4.84 1.61 143 -3.004 0.0031
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
##
## $`simple contrasts for Emotion`
## talkergroup_final = 0:
## contrast estimate SE df t.ratio p.value
## Neg - Neut 2.799 1.38 76 2.032 0.0456
##
## talkergroup_final = 1:
## contrast estimate SE df t.ratio p.value
## Neg - Neut -0.911 1.49 76 -0.613 0.5419
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
# Sensitivity - normally distributed
# Perform a two-sample t-test
t_test_result <- t.test(sensitivity ~ talkergroup_final, data = ZOO_FCz_Pz_GO_neut, alternative = "two.sided")
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: sensitivity by talkergroup_final
## t = 1.8475, df = 73.986, p-value = 0.06868
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
## -0.03989745 1.05594113
## sample estimates:
## mean in group 0 mean in group 1
## 2.764118 2.256096
t_test_result <- t.test(sensitivity ~ talkergroup_final, data = ZOO_FCz_Pz_GO_aff, alternative = "two.sided")
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: sensitivity by talkergroup_final
## t = 1.1276, df = 76.929, p-value = 0.263
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
## -0.2233266 0.8064415
## sample estimates:
## mean in group 0 mean in group 1
## 2.781101 2.489543
# Lm with covariate, when age is taken into account sensitivity is significant <.05.
lm_result <- lm(sensitivity ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_GO_neut)
summary(lm_result)
##
## Call:
## lm(formula = sensitivity ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_GO_neut)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.3925 -0.5937 -0.1467 0.4411 4.2369
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.63969 0.64030 0.999 0.32098
## talkergroup_final -0.55099 0.26461 -2.082 0.04073 *
## calculator_age_cve 0.03455 0.01040 3.322 0.00138 **
## calculator_gender_cve 0.25195 0.26652 0.945 0.34753
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.172 on 75 degrees of freedom
## Multiple R-squared: 0.173, Adjusted R-squared: 0.1399
## F-statistic: 5.228 on 3 and 75 DF, p-value: 0.002482
lm_result <- lm(sensitivity ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_GO_aff)
summary(lm_result)
##
## Call:
## lm(formula = sensitivity ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_GO_aff)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.1443 -0.5351 -0.1700 0.2883 3.8903
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.990007 0.600591 1.648 0.10346
## talkergroup_final -0.321778 0.248199 -1.296 0.19879
## calculator_age_cve 0.030623 0.009756 3.139 0.00242 **
## calculator_gender_cve 0.055914 0.249994 0.224 0.82363
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.099 on 75 degrees of freedom
## Multiple R-squared: 0.1309, Adjusted R-squared: 0.09618
## F-statistic: 3.767 on 3 and 75 DF, p-value: 0.01413
#GLOBAL MODEL - SENSITIVITY
model1 <- lmer(sensitivity ~ talkergroup_final*Emotion + calculator_gender_cve + calculator_age_cve + (1| Subject), data = ZOO_FCz_Pz_NOGO)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 3.7704 3.7704 1 75 4.3832 0.0396742 *
## Emotion 0.0061 0.0061 1 77 0.0070 0.9333462
## calculator_gender_cve 0.4624 0.4624 1 75 0.5376 0.4657176
## calculator_age_cve 13.6094 13.6094 1 75 15.8215 0.0001591 ***
## talkergroup_final:Emotion 0.4609 0.4609 1 77 0.5358 0.4664146
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
pairwise_comp <- emmeans(model1, pairwise ~ talkergroup_final | Emotion) # Tukey is default
pairs(pairwise_comp, adjust= "none", simple = "each") # adjust = "bonferroni"
## $`simple contrasts for talkergroup_final`
## Emotion = Neg:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 0.328 0.256 136 1.284 0.2013
##
## Emotion = Neut:
## contrast estimate SE df t.ratio p.value
## talkergroup_final0 - talkergroup_final1 0.545 0.256 136 2.131 0.0349
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
##
## $`simple contrasts for Emotion`
## talkergroup_final = 0:
## contrast estimate SE df t.ratio p.value
## Neg - Neut 0.017 0.202 77 0.084 0.9333
##
## talkergroup_final = 1:
## contrast estimate SE df t.ratio p.value
## Neg - Neut 0.233 0.216 77 1.083 0.2824
##
## Results are averaged over the levels of: calculator_gender_cve
## Degrees-of-freedom method: kenward-roger
# Create a new model using aov()
model_aov <- aov(sensitivity ~ talkergroup_final * Emotion + calculator_gender_cve + calculator_age_cve, data = ZOO_FCz_Pz_NOGO)
summary(model_aov)
## Df Sum Sq Mean Sq F value Pr(>F)
## talkergroup_final 1 6.29 6.288 4.927 0.0279 *
## Emotion 1 0.55 0.553 0.434 0.5112
## calculator_gender_cve 1 1.00 1.004 0.787 0.3764
## calculator_age_cve 1 26.95 26.949 21.117 9.02e-06 ***
## talkergroup_final:Emotion 1 0.46 0.461 0.361 0.5488
## Residuals 152 193.98 1.276
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# check for assumptions:
# Linearity and Homoscedasticity:
# If the relationship is linear, there should be no pattern to the residuals
# The spread of the residuals should be constant across all levels of the fitted values.
# create residuals vs fitted values plot
plot(resid(model1) ~ fitted(model1), ylab="Residuals", xlab="Fitted values")
# add a regression line
abline(lm(resid(model1) ~ fitted(model1)), col="red")
#The Breusch-Pagan test or the White test can be used to statistically test for constant variance of the residuals (homoscedasticity)
library(lmtest)
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
bptest(residuals(model1) ~ fitted(model1))
##
## studentized Breusch-Pagan test
##
## data: residuals(model1) ~ fitted(model1)
## BP = 51.522, df = 1, p-value = 7.078e-13
# Normality: The points should fall along a straight line.
qqnorm(resid(model1))
qqline(resid(model1))
# a statistical test for normality, such as the Shapiro-Wilk test
shapiro.test(resid(model1))
##
## Shapiro-Wilk normality test
##
## data: resid(model1)
## W = 0.86256, p-value = 7.62e-11
# No multicollinearity: You can check this assumption by calculating the variance inflation factors (VIF) for the predictors.
# A VIF greater than 5 or 10 indicates high multicollinearity.
library(car)
##
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
##
## recode
vif(model1)
## talkergroup_final Emotion calculator_gender_cve
## 1.508483 1.880952 1.002322
## calculator_age_cve talkergroup_final:Emotion
## 1.001335 2.385960
# Reaction Time - normally distributed
lm_result <- lm(RT_proper ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_GO_neut)
summary(lm_result)
##
## Call:
## lm(formula = RT_proper ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_GO_neut)
##
## Residuals:
## Min 1Q Median 3Q Max
## -176.17 -67.21 -11.64 45.36 252.77
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1109.9842 53.0158 20.937 < 2e-16 ***
## talkergroup_final 30.9944 22.0594 1.405 0.164
## calculator_age_cve -6.1768 0.8619 -7.167 4.8e-10 ***
## calculator_gender_cve -12.5490 22.1611 -0.566 0.573
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 96.98 on 74 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.4173, Adjusted R-squared: 0.3937
## F-statistic: 17.67 on 3 and 74 DF, p-value: 9.552e-09
lm_result <- lm(RT_proper ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_GO_aff)
summary(lm_result)
##
## Call:
## lm(formula = RT_proper ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_GO_aff)
##
## Residuals:
## Min 1Q Median 3Q Max
## -180.92 -67.13 -13.00 52.92 282.93
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1123.4148 55.2714 20.325 < 2e-16 ***
## talkergroup_final 34.5201 22.9979 1.501 0.138
## calculator_age_cve -5.8577 0.8985 -6.519 7.64e-09 ***
## calculator_gender_cve -37.8747 23.1039 -1.639 0.105
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 101.1 on 74 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.3877, Adjusted R-squared: 0.3629
## F-statistic: 15.62 on 3 and 74 DF, p-value: 5.767e-08
lm_result <- lm(RT_proper ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_NOGO_neut)
summary(lm_result)
##
## Call:
## lm(formula = RT_proper ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_NOGO_neut)
##
## Residuals:
## Min 1Q Median 3Q Max
## -360.71 -128.48 -32.76 93.89 795.34
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1233.027 108.340 11.381 < 2e-16 ***
## talkergroup_final 85.843 45.312 1.895 0.0621 .
## calculator_age_cve -10.482 1.759 -5.957 8.31e-08 ***
## calculator_gender_cve 25.896 45.410 0.570 0.5702
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 197.8 on 73 degrees of freedom
## (2 observations deleted due to missingness)
## Multiple R-squared: 0.3462, Adjusted R-squared: 0.3193
## F-statistic: 12.89 on 3 and 73 DF, p-value: 7.616e-07
lm_result <- lm(RT_proper ~ talkergroup_final + calculator_age_cve + calculator_gender_cve, data = ZOO_FCz_Pz_NOGO_aff)
summary(lm_result)
##
## Call:
## lm(formula = RT_proper ~ talkergroup_final + calculator_age_cve +
## calculator_gender_cve, data = ZOO_FCz_Pz_NOGO_aff)
##
## Residuals:
## Min 1Q Median 3Q Max
## -312.18 -124.29 -17.79 56.72 770.92
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1070.845 97.882 10.940 < 2e-16 ***
## talkergroup_final 74.919 40.728 1.840 0.06985 .
## calculator_age_cve -7.023 1.591 -4.414 3.4e-05 ***
## calculator_gender_cve -109.878 40.916 -2.685 0.00894 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 179.1 on 74 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.2847, Adjusted R-squared: 0.2557
## F-statistic: 9.819 on 3 and 74 DF, p-value: 1.575e-05
model1 <- lmer(RT_proper ~ talkergroup_final*Emotion*Condition +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value
## talkergroup_final 74636 74636 1 74.166 4.9974
## Emotion 4110 4110 1 227.173 0.2752
## Condition 723048 723048 1 227.173 48.4130
## calculator_gender_cve 26239 26239 1 74.105 1.7569
## calculator_age_cve 836229 836229 1 74.014 55.9913
## talkergroup_final:Emotion 389 389 1 227.364 0.0260
## talkergroup_final:Condition 37456 37456 1 227.364 2.5079
## Emotion:Condition 32462 32462 1 227.173 2.1736
## talkergroup_final:Emotion:Condition 1068 1068 1 227.364 0.0715
## Pr(>F)
## talkergroup_final 0.02839 *
## Emotion 0.60039
## Condition 3.652e-11 ***
## calculator_gender_cve 0.18909
## calculator_age_cve 1.224e-10 ***
## talkergroup_final:Emotion 0.87197
## talkergroup_final:Condition 0.11466
## Emotion:Condition 0.14178
## talkergroup_final:Emotion:Condition 0.78937
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "talkergroup_final"))
model1 <- lmer(RT_proper ~ talkergroup_final*Emotion +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz_GO, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 7579 7579 1 74 2.4982 0.1182
## Emotion 6735 6735 1 76 2.2203 0.1403
## calculator_gender_cve 4448 4448 1 74 1.4663 0.2298
## calculator_age_cve 167523 167523 1 74 55.2224 1.532e-10 ***
## talkergroup_final:Emotion 84 84 1 76 0.0278 0.8679
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "talkergroup_final"))
model1 <- lmer(RT_proper ~ talkergroup_final*Emotion +
calculator_gender_cve + calculator_age_cve + (1|Subject), data = ZOO_FCz_Pz_NOGO, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 115728 115728 1 73.833 4.8261 0.03117 *
## Emotion 29837 29837 1 75.020 1.2442 0.26822
## calculator_gender_cve 33963 33963 1 73.675 1.4163 0.23783
## calculator_age_cve 935452 935452 1 73.435 39.0101 2.472e-08 ***
## talkergroup_final:Emotion 873 873 1 75.522 0.0364 0.84919
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "talkergroup_final"))
# Create the Dataframe to get the brief data
summary_brief_x <-
ZOO_FCz_Pz%>%
group_by(Subject) %>%
filter(!is.na(inhibit)) %>%
summarise(
age = mean(calculator_age_cve, na.rm = TRUE),
talkergroup_final = mean(talkergroup_final, na.rm = TRUE),
inhibit = mean(inhibit, na.rm = TRUE),
shift = mean(shift, na.rm = TRUE),
emotionalCntrl = mean(emotionalCntrl, na.rm = TRUE),
workingMemory = mean(workingMemory, na.rm = TRUE),
planOrganize = mean(planOrganize, na.rm = TRUE),
BehavioralRegulationIndex_BRI = mean(BehavioralRegulationIndex_BRI, na.rm = TRUE),
MetacognitionIndex_MI = mean(MetacognitionIndex_MI, na.rm = TRUE),
GlobalExecutiveComposite_GEC = mean(GlobalExecutiveComposite_GEC, na.rm = TRUE)
)
summary_brief <-
summary_brief_x%>%
group_by(talkergroup_final) %>%
summarise(
age_mean = mean(age, na.rm = TRUE),
inhibit_mean = mean(inhibit, na.rm = TRUE),
SEM_inhibit_mean= sd(inhibit, na.rm = TRUE) / sqrt(n()),
shift_mean = mean(shift, na.rm = TRUE),
SEM_shift_mean = sd(shift, na.rm = TRUE) / sqrt(n()),
emotionalCntrl_mean = mean(emotionalCntrl, na.rm = TRUE),
SEM_emotionalCntrl_mean = sd(emotionalCntrl, na.rm = TRUE) / sqrt(n()),
workingMemory_mean = mean(workingMemory, na.rm = TRUE),
SEM_workingMemory_mean = sd(workingMemory, na.rm = TRUE) / sqrt(n()),
planOrganize_mean = mean(planOrganize, na.rm = TRUE),
SEM_planOrganize_mean = sd(planOrganize, na.rm = TRUE) / sqrt(n()),
BehavioralRegulationIndex_BRI_mean = mean(BehavioralRegulationIndex_BRI, na.rm = TRUE),
SEM_BehavioralRegulationIndex_BRI_mean = sd(BehavioralRegulationIndex_BRI, na.rm = TRUE) / sqrt(n()),
MetacognitionIndex_MI_mean = mean(MetacognitionIndex_MI, na.rm = TRUE),
SEM_MetacognitionIndex_MI_mean = sd(MetacognitionIndex_MI, na.rm = TRUE) / sqrt(n()),
GlobalExecutiveComposite_GEC_mean = mean(GlobalExecutiveComposite_GEC, na.rm = TRUE),
SEM_GlobalExecutiveComposite_GEC_mean = sd(GlobalExecutiveComposite_GEC, na.rm = TRUE) / sqrt(n())
)
summary_brief_young <-
summary_brief_x%>%
filter(age < 55 ) %>%
group_by(talkergroup_final) %>%
summarise(
age_mean = mean(age, na.rm = TRUE),
inhibit_mean = mean(inhibit, na.rm = TRUE),
SEM_inhibit_mean= sd(inhibit, na.rm = TRUE) / sqrt(n()),
shift_mean = mean(shift, na.rm = TRUE),
SEM_shift_mean = sd(shift, na.rm = TRUE) / sqrt(n()),
emotionalCntrl_mean = mean(emotionalCntrl, na.rm = TRUE),
SEM_emotionalCntrl_mean = sd(emotionalCntrl, na.rm = TRUE) / sqrt(n()),
workingMemory_mean = mean(workingMemory, na.rm = TRUE),
SEM_workingMemory_mean = sd(workingMemory, na.rm = TRUE) / sqrt(n()),
planOrganize_mean = mean(planOrganize, na.rm = TRUE),
SEM_planOrganize_mean = sd(planOrganize, na.rm = TRUE) / sqrt(n()),
BehavioralRegulationIndex_BRI_mean = mean(BehavioralRegulationIndex_BRI, na.rm = TRUE),
SEM_BehavioralRegulationIndex_BRI_mean = sd(BehavioralRegulationIndex_BRI, na.rm = TRUE) / sqrt(n()),
MetacognitionIndex_MI_mean = mean(MetacognitionIndex_MI, na.rm = TRUE),
SEM_MetacognitionIndex_MI_mean = sd(MetacognitionIndex_MI, na.rm = TRUE) / sqrt(n()),
GlobalExecutiveComposite_GEC_mean = mean(GlobalExecutiveComposite_GEC, na.rm = TRUE),
SEM_GlobalExecutiveComposite_GEC_mean = sd(GlobalExecutiveComposite_GEC, na.rm = TRUE) / sqrt(n())
)
summary_brief_old <-
summary_brief_x%>%
filter(age > 55 ) %>%
group_by(talkergroup_final) %>%
summarise(
age_mean = mean(age, na.rm = TRUE),
inhibit_mean = mean(inhibit, na.rm = TRUE),
SEM_inhibit_mean= sd(inhibit, na.rm = TRUE) / sqrt(n()),
shift_mean = mean(shift, na.rm = TRUE),
SEM_shift_mean = sd(shift, na.rm = TRUE) / sqrt(n()),
emotionalCntrl_mean = mean(emotionalCntrl, na.rm = TRUE),
SEM_emotionalCntrl_mean = sd(emotionalCntrl, na.rm = TRUE) / sqrt(n()),
workingMemory_mean = mean(workingMemory, na.rm = TRUE),
SEM_workingMemory_mean = sd(workingMemory, na.rm = TRUE) / sqrt(n()),
planOrganize_mean = mean(planOrganize, na.rm = TRUE),
SEM_planOrganize_mean = sd(planOrganize, na.rm = TRUE) / sqrt(n()),
BehavioralRegulationIndex_BRI_mean = mean(BehavioralRegulationIndex_BRI, na.rm = TRUE),
SEM_BehavioralRegulationIndex_BRI_mean = sd(BehavioralRegulationIndex_BRI, na.rm = TRUE) / sqrt(n()),
MetacognitionIndex_MI_mean = mean(MetacognitionIndex_MI, na.rm = TRUE),
SEM_MetacognitionIndex_MI_mean = sd(MetacognitionIndex_MI, na.rm = TRUE) / sqrt(n()),
GlobalExecutiveComposite_GEC_mean = mean(GlobalExecutiveComposite_GEC, na.rm = TRUE),
SEM_GlobalExecutiveComposite_GEC_mean = sd(GlobalExecutiveComposite_GEC, na.rm = TRUE) / sqrt(n())
)
# Rename some factors for better visuals:
summary_brief$talkergroup_final <- factor(summary_brief$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
summary_brief_young$talkergroup_final <- factor(summary_brief_young$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
summary_brief_old$talkergroup_final <- factor(summary_brief_old$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
# BAR PLOTS
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
bar_plot1 <- ggplot(summary_brief, aes(x = talkergroup_final, y = inhibit_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = inhibit_mean - SEM_inhibit_mean, ymax = inhibit_mean + SEM_inhibit_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "Inhibit") +
theme_minimal()
bar_plot2 <- ggplot(summary_brief, aes(x = talkergroup_final, y = shift_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = shift_mean - SEM_shift_mean, ymax = shift_mean + SEM_shift_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "shift") +
theme_minimal()
bar_plot3 <- ggplot(summary_brief, aes(x = talkergroup_final, y = emotionalCntrl_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = emotionalCntrl_mean - SEM_emotionalCntrl_mean, ymax = emotionalCntrl_mean + SEM_emotionalCntrl_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "emotionalCntrl") +
theme_minimal()
bar_plot4 <- ggplot(summary_brief, aes(x = talkergroup_final, y = workingMemory_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = workingMemory_mean - SEM_workingMemory_mean, ymax = workingMemory_mean + SEM_workingMemory_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "workingMemory") +
theme_minimal()
bar_plot5 <- ggplot(summary_brief, aes(x = talkergroup_final, y = planOrganize_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = planOrganize_mean - SEM_planOrganize_mean, ymax = planOrganize_mean + SEM_planOrganize_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "planOrganize") +
theme_minimal()
bar_plot6 <- ggplot(summary_brief, aes(x = talkergroup_final, y = BehavioralRegulationIndex_BRI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = BehavioralRegulationIndex_BRI_mean - SEM_BehavioralRegulationIndex_BRI_mean, ymax = BehavioralRegulationIndex_BRI_mean + SEM_BehavioralRegulationIndex_BRI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "BehavioralRegulationIndex") +
theme_minimal()
bar_plot7 <- ggplot(summary_brief, aes(x = talkergroup_final, y = MetacognitionIndex_MI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = MetacognitionIndex_MI_mean - SEM_MetacognitionIndex_MI_mean, ymax = MetacognitionIndex_MI_mean + SEM_MetacognitionIndex_MI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "MetacognitionIndex") +
theme_minimal()
bar_plot8 <- ggplot(summary_brief, aes(x = talkergroup_final, y = GlobalExecutiveComposite_GEC_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = GlobalExecutiveComposite_GEC_mean - SEM_GlobalExecutiveComposite_GEC_mean, ymax = GlobalExecutiveComposite_GEC_mean + SEM_GlobalExecutiveComposite_GEC_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "GlobalExecutiveComposite") +
theme_minimal()
# arrange the plots in a grid
grid.arrange(bar_plot1, bar_plot2, bar_plot3, bar_plot4, bar_plot5, bar_plot6, bar_plot7, bar_plot8, ncol = 4, nrow = 2)
bar_plot1 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = inhibit_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = inhibit_mean - SEM_inhibit_mean, ymax = inhibit_mean + SEM_inhibit_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "Inhibit") +
theme_minimal() + coord_cartesian(ylim = c(0, 30))
bar_plot2 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = shift_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = shift_mean - SEM_shift_mean, ymax = shift_mean + SEM_shift_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "shift") +
theme_minimal() + coord_cartesian(ylim = c(0, 15))
bar_plot3 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = emotionalCntrl_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = emotionalCntrl_mean - SEM_emotionalCntrl_mean, ymax = emotionalCntrl_mean + SEM_emotionalCntrl_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "emotionalCntrl") +
theme_minimal() + coord_cartesian(ylim = c(0, 20))
bar_plot4 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = workingMemory_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = workingMemory_mean - SEM_workingMemory_mean, ymax = workingMemory_mean + SEM_workingMemory_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "workingMemory") +
theme_minimal() + coord_cartesian(ylim = c(0, 25))
bar_plot5 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = planOrganize_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = planOrganize_mean - SEM_planOrganize_mean, ymax = planOrganize_mean + SEM_planOrganize_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "planOrganize") +
theme_minimal() + coord_cartesian(ylim = c(0, 20))
bar_plot6 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = BehavioralRegulationIndex_BRI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = BehavioralRegulationIndex_BRI_mean - SEM_BehavioralRegulationIndex_BRI_mean, ymax = BehavioralRegulationIndex_BRI_mean + SEM_BehavioralRegulationIndex_BRI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "BehavioralRegulationIndex") +
theme_minimal() + coord_cartesian(ylim = c(0, 60))
bar_plot7 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = MetacognitionIndex_MI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = MetacognitionIndex_MI_mean - SEM_MetacognitionIndex_MI_mean, ymax = MetacognitionIndex_MI_mean + SEM_MetacognitionIndex_MI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "MetacognitionIndex") +
theme_minimal() + coord_cartesian(ylim = c(0, 70))
bar_plot8 <- ggplot(summary_brief_young, aes(x = talkergroup_final, y = GlobalExecutiveComposite_GEC_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = GlobalExecutiveComposite_GEC_mean - SEM_GlobalExecutiveComposite_GEC_mean, ymax = GlobalExecutiveComposite_GEC_mean + SEM_GlobalExecutiveComposite_GEC_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "GlobalExecutiveComposite") +
theme_minimal() + coord_cartesian(ylim = c(0, 120))
# arrange the plots in a grid
grid.arrange(bar_plot1, bar_plot2, bar_plot3, bar_plot4, bar_plot5, bar_plot6, bar_plot7, bar_plot8, ncol = 4, nrow = 2)
bar_plot1 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = inhibit_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = inhibit_mean - SEM_inhibit_mean, ymax = inhibit_mean + SEM_inhibit_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "Inhibit") +
theme_minimal() + coord_cartesian(ylim = c(0, 30))
bar_plot2 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = shift_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = shift_mean - SEM_shift_mean, ymax = shift_mean + SEM_shift_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "shift") +
theme_minimal() + coord_cartesian(ylim = c(0, 15))
bar_plot3 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = emotionalCntrl_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = emotionalCntrl_mean - SEM_emotionalCntrl_mean, ymax = emotionalCntrl_mean + SEM_emotionalCntrl_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "emotionalCntrl") +
theme_minimal() + coord_cartesian(ylim = c(0, 20))
bar_plot4 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = workingMemory_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = workingMemory_mean - SEM_workingMemory_mean, ymax = workingMemory_mean + SEM_workingMemory_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "workingMemory") +
theme_minimal() + coord_cartesian(ylim = c(0, 25))
bar_plot5 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = planOrganize_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = planOrganize_mean - SEM_planOrganize_mean, ymax = planOrganize_mean + SEM_planOrganize_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "planOrganize") +
theme_minimal() + coord_cartesian(ylim = c(0, 20))
bar_plot6 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = BehavioralRegulationIndex_BRI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = BehavioralRegulationIndex_BRI_mean - SEM_BehavioralRegulationIndex_BRI_mean, ymax = BehavioralRegulationIndex_BRI_mean + SEM_BehavioralRegulationIndex_BRI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "BehavioralRegulationIndex") +
theme_minimal() + coord_cartesian(ylim = c(0, 60))
bar_plot7 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = MetacognitionIndex_MI_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = MetacognitionIndex_MI_mean - SEM_MetacognitionIndex_MI_mean, ymax = MetacognitionIndex_MI_mean + SEM_MetacognitionIndex_MI_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "MetacognitionIndex") +
theme_minimal() + coord_cartesian(ylim = c(0, 70))
bar_plot8 <- ggplot(summary_brief_old, aes(x = talkergroup_final, y = GlobalExecutiveComposite_GEC_mean)) +
geom_bar(stat = "identity", position = "dodge", width = 0.5, fill ='royalblue2') +
geom_errorbar(aes(ymin = GlobalExecutiveComposite_GEC_mean - SEM_GlobalExecutiveComposite_GEC_mean, ymax = GlobalExecutiveComposite_GEC_mean + SEM_GlobalExecutiveComposite_GEC_mean), width = 0.2, position = position_dodge(width = 0.7)) +
labs(x = "TalkerGroup", y = "GlobalExecutiveComposite") +
theme_minimal() + coord_cartesian(ylim = c(0, 120))
# arrange the plots in a grid
grid.arrange(bar_plot1, bar_plot2, bar_plot3, bar_plot4, bar_plot5, bar_plot6, bar_plot7, bar_plot8, ncol = 4, nrow = 2)
# ERP MEASURES:
# MAIN MODEL: only effect of condition
model1 <- lmer(MeanAmp_P3 ~ talkergroup_final*Emotion*Condition + calculator_gender_cve + calculator_age_cve +
(1|Subject), data = ZOO_FCz_Pz, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value
## talkergroup_final 24.019 24.019 1 75 2.0383
## Emotion 1.483 1.483 1 231 0.1258
## Condition 131.826 131.826 1 231 11.1872
## calculator_gender_cve 0.183 0.183 1 75 0.0155
## calculator_age_cve 2.443 2.443 1 75 0.2073
## talkergroup_final:Emotion 4.736 4.736 1 231 0.4019
## talkergroup_final:Condition 0.187 0.187 1 231 0.0158
## Emotion:Condition 0.155 0.155 1 231 0.0131
## talkergroup_final:Emotion:Condition 1.912 1.912 1 231 0.1623
## Pr(>F)
## talkergroup_final 0.157531
## Emotion 0.723101
## Condition 0.000961 ***
## calculator_gender_cve 0.901109
## calculator_age_cve 0.650169
## talkergroup_final:Emotion 0.526745
## talkergroup_final:Condition 0.899958
## Emotion:Condition 0.908861
## talkergroup_final:Emotion:Condition 0.687446
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Emotion" , "Condition", "talkergroup_final"))
# GLOBAL MODEL: NOT MUCH CHANGES
model2 <- lmer(MeanAmp_P3 ~ talkergroup_final*Emotion*Condition + calculator_gender_cve + calculator_age_cve + shift + inhibit +
workingMemory + planOrganize + FlexibilityIndex_FI + InhibitorySelfControlIndex_ISCI + GlobalExecutiveComposite_GEC +
disfluency_sldper100words + emotionalCntrl + BehavioralRegulationIndex_BRI + MetacognitionIndex_MI + premature_responses +
RT_proper + sensitivity + (1|Subject), data = ZOO_FCz_Pz, REML = TRUE)
## fixed-effect model matrix is rank deficient so dropping 5 columns / coefficients
anova(model2)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value
## talkergroup_final 9.102 9.102 1 50.685 0.7176
## Emotion 6.227 6.227 1 164.944 0.4909
## Condition 93.636 93.636 1 169.742 7.3820
## calculator_gender_cve 9.572 9.572 1 47.989 0.7546
## calculator_age_cve 96.029 96.029 1 62.491 7.5707
## shift 0.145 0.145 1 47.213 0.0114
## inhibit 0.732 0.732 1 47.592 0.0577
## workingMemory 30.427 30.427 1 49.567 2.3988
## planOrganize 20.329 20.329 1 48.863 1.6027
## FlexibilityIndex_FI 5.522 5.522 1 47.551 0.4353
## disfluency_sldper100words 1.548 1.548 1 47.090 0.1220
## premature_responses 3.483 3.483 1 194.013 0.2746
## RT_proper 15.114 15.114 1 205.916 1.1916
## sensitivity 6.182 6.182 1 196.431 0.4874
## talkergroup_final:Emotion 0.002 0.002 1 165.071 0.0002
## talkergroup_final:Condition 10.250 10.250 1 165.855 0.8081
## Emotion:Condition 0.545 0.545 1 165.156 0.0430
## talkergroup_final:Emotion:Condition 6.965 6.965 1 165.026 0.5491
## InhibitorySelfControlIndex_ISCI
## GlobalExecutiveComposite_GEC
## emotionalCntrl
## BehavioralRegulationIndex_BRI
## MetacognitionIndex_MI
## Pr(>F)
## talkergroup_final 0.400916
## Emotion 0.484493
## Condition 0.007271 **
## calculator_gender_cve 0.389333
## calculator_age_cve 0.007753 **
## shift 0.915358
## inhibit 0.811153
## workingMemory 0.127790
## planOrganize 0.211527
## FlexibilityIndex_FI 0.512569
## disfluency_sldper100words 0.728397
## premature_responses 0.600857
## RT_proper 0.276288
## sensitivity 0.485916
## talkergroup_final:Emotion 0.989215
## talkergroup_final:Condition 0.369996
## Emotion:Condition 0.836046
## talkergroup_final:Emotion:Condition 0.459726
## InhibitorySelfControlIndex_ISCI
## GlobalExecutiveComposite_GEC
## emotionalCntrl
## BehavioralRegulationIndex_BRI
## MetacognitionIndex_MI
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# MODEL 2 IS PREFERRED OVER MODEL 1:
# Calculate AIC for model1
aic_model1 <- AIC(model1)
# Calculate AIC for model2
aic_model2 <- AIC(model2)
# Compare AIC values
if (aic_model1 < aic_model2) {
cat("Model 1 is preferred (lower AIC)\n")
} else if (aic_model2 < aic_model1) {
cat("Model 2 is preferred (lower AIC)\n")
} else {
cat("Both models have similar AIC\n")
}
## Model 2 is preferred (lower AIC)
# Linearity and Homoscedasticity:
# If the relationship is linear, there should be no pattern to the residuals. The spread of the residuals should be constant across all levels of the fitted values.
# create residuals vs fitted values plot
plot(resid(model1) ~ fitted(model1), ylab="Residuals", xlab="Fitted values")
# add a regression line
abline(lm(resid(model1) ~ fitted(model1)), col="red")
#The Breusch-Pagan test or the White test can be used to statistically test for constant variance of the residuals (homoscedasticity)
library(lmtest)
bptest(residuals(model1) ~ fitted(model1))
##
## studentized Breusch-Pagan test
##
## data: residuals(model1) ~ fitted(model1)
## BP = 11.306, df = 1, p-value = 0.0007727
# Normality: The points should fall along a straight line.
qqnorm(resid(model1))
qqline(resid(model1))
# Statistical test for normality, such as the Shapiro-Wilk test
shapiro.test(resid(model1))
##
## Shapiro-Wilk normality test
##
## data: resid(model1)
## W = 0.9957, p-value = 0.5381
# No multicollinearity: You can check this assumption by calculating the variance inflation factors (VIF) for the predictors.
# A VIF greater than 5 or 10 indicates high multicollinearity.
library(car)
vif(model1)
## talkergroup_final Emotion
## 1.603888 3.761905
## Condition calculator_gender_cve
## 3.761905 1.002322
## calculator_age_cve talkergroup_final:Emotion
## 1.001335 4.162180
## talkergroup_final:Condition Emotion:Condition
## 4.162180 5.642857
## talkergroup_final:Emotion:Condition
## 5.842995
# PLOT MODEL as if the three-way interaction exists
plot_model(model1, type="pred", terms = c("Emotion" , "Condition", "talkergroup_final"))
# Calculate marginal means for NONEXISTENT the three-way interaction
means <- as.data.frame(emmeans(model1, specs = ~ talkergroup_final:Emotion:Condition))
# Rename the levels of the "talkergroup_final" factor
means$talkergroup_final <- factor(means$talkergroup_final, levels = c(0, 1), labels = c("CWNS", "CWS"))
means$Emotion <- factor(means$Emotion, levels = c("Neg", "Neut"), labels = c("Affective", "Neutral"))
# Plot the predicted means
bar_plot <- ggplot(means, aes(x = Emotion, y = emmean, fill = Condition)) +
geom_bar(stat = "identity", position = "dodge") +
geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), width = 0.2, position = position_dodge(0.9)) +
labs(x = "Emotion", y = "Mean P3 in μV") +
facet_grid(. ~ talkergroup_final) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered", "royalblue2", "orangered"))
bar_plot <- bar_plot + coord_cartesian(ylim = c(4, 12))
print(bar_plot)
bar_plot <- ggplot(means, aes(x = Condition, y = emmean, fill = Emotion)) +
geom_bar(stat = "identity", position = "dodge") +
geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), width = 0.2, position = position_dodge(0.9)) +
labs(x = "Condition", y = "Mean P3 in μV") +
facet_grid(. ~ talkergroup_final) +
theme_minimal() +
theme(
axis.text = element_text(size = 14, color = "black"),
axis.title = element_text(size = 14),
legend.title =element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14, face = "bold", vjust = 2),
strip.text = element_text(size = 14, face = "bold")) +
scale_fill_manual(values = c("springgreen3", "royalblue2", "springgreen3", "royalblue2", "springgreen3", "royalblue2", "springgreen3", "royalblue2"))
bar_plot <- bar_plot + coord_cartesian(ylim = c(4, 12))
print(bar_plot)
# LATERALITY - LATERALITY TALKERGROUP INTERACTION
model1 <- lmer(MeanAmp_P3 ~ talkergroup_final*Emotion*Condition*Laterality + calculator_gender_cve + calculator_age_cve +
(1|Subject), data = ZOO_good, REML = TRUE)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF
## talkergroup_final 3.69 3.69 1 75
## Emotion 9.92 9.92 1 847
## Condition 75.97 75.97 1 847
## Laterality 1932.65 966.33 2 847
## calculator_gender_cve 3.36 3.36 1 75
## calculator_age_cve 1.57 1.57 1 75
## talkergroup_final:Emotion 11.64 11.64 1 847
## talkergroup_final:Condition 2.34 2.34 1 847
## Emotion:Condition 1.42 1.42 1 847
## talkergroup_final:Laterality 103.33 51.66 2 847
## Emotion:Laterality 10.48 5.24 2 847
## Condition:Laterality 71.01 35.50 2 847
## talkergroup_final:Emotion:Condition 29.53 29.53 1 847
## talkergroup_final:Emotion:Laterality 1.96 0.98 2 847
## talkergroup_final:Condition:Laterality 56.14 28.07 2 847
## Emotion:Condition:Laterality 0.15 0.08 2 847
## talkergroup_final:Emotion:Condition:Laterality 4.78 2.39 2 847
## F value Pr(>F)
## talkergroup_final 0.2325 0.63107
## Emotion 0.6258 0.42913
## Condition 4.7923 0.02886 *
## Laterality 60.9583 < 2e-16 ***
## calculator_gender_cve 0.2122 0.64637
## calculator_age_cve 0.0988 0.75412
## talkergroup_final:Emotion 0.7346 0.39164
## talkergroup_final:Condition 0.1475 0.70101
## Emotion:Condition 0.0894 0.76504
## talkergroup_final:Laterality 3.2591 0.03890 *
## Emotion:Laterality 0.3305 0.71864
## Condition:Laterality 2.2396 0.10713
## talkergroup_final:Emotion:Condition 1.8626 0.17269
## talkergroup_final:Emotion:Laterality 0.0619 0.93994
## talkergroup_final:Condition:Laterality 1.7709 0.17082
## Emotion:Condition:Laterality 0.0047 0.99527
## talkergroup_final:Emotion:Condition:Laterality 0.1509 0.85998
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot_model(model1, type="pred", terms = c("Laterality", "talkergroup_final"))
# USED THIS FOR THE POSTER VKC - but not appropriate due to repeated nature of the design
# model1 <- lm (accuracy ~ talkergroup_final + MeanAmp_N2P2 + MeanAmp_P3 +MeanAmp_P2 + Latency_N2 + Latency_P3 + Latency_P2 + calculator_age_cve+calculator_gender_cve, data = ZOO_FCz_Pz)
# Given the repeated nature of your study (each subject having 4 accuracy scores), we should consider using a mixed-effects model to account for the potential correlation within the repeated measures:
# Random intercept for each subject
model1 <- lmer(accuracy ~ talkergroup_final*Emotion*Condition + MeanAmp_N2P2 + MeanAmp_P3 + MeanAmp_P2 + Latency_N2 + Latency_P3 +
Latency_P2 + calculator_age_cve + calculator_gender_cve +
(1 | Subject), data = ZOO_FCz_Pz)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value
## talkergroup_final 258.7 258.7 1 75.546 2.6965
## Emotion 55.0 55.0 1 230.202 0.5729
## Condition 7742.7 7742.7 1 265.572 80.7001
## MeanAmp_N2P2 42.5 42.5 1 252.181 0.4429
## MeanAmp_P3 366.0 366.0 1 248.478 3.8152
## MeanAmp_P2 364.4 364.4 1 269.840 3.7979
## Latency_N2 102.9 102.9 1 297.763 1.0730
## Latency_P3 202.7 202.7 1 299.481 2.1123
## Latency_P2 2.4 2.4 1 298.602 0.0246
## calculator_age_cve 725.0 725.0 1 80.707 7.5567
## calculator_gender_cve 77.7 77.7 1 74.293 0.8100
## talkergroup_final:Emotion 0.1 0.1 1 227.716 0.0010
## talkergroup_final:Condition 139.5 139.5 1 227.779 1.4537
## Emotion:Condition 19.4 19.4 1 228.418 0.2027
## talkergroup_final:Emotion:Condition 39.8 39.8 1 232.321 0.4144
## Pr(>F)
## talkergroup_final 0.104722
## Emotion 0.449872
## Condition < 2.2e-16 ***
## MeanAmp_N2P2 0.506358
## MeanAmp_P3 0.051912 .
## MeanAmp_P2 0.052352 .
## Latency_N2 0.301120
## Latency_P3 0.147163
## Latency_P2 0.875562
## calculator_age_cve 0.007375 **
## calculator_gender_cve 0.371021
## talkergroup_final:Emotion 0.975426
## talkergroup_final:Condition 0.229187
## Emotion:Condition 0.652970
## talkergroup_final:Emotion:Condition 0.520391
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Random slope for talkergroup_final and a random intercept for each subject
model2 <- lmer(accuracy ~ talkergroup_final + MeanAmp_N2P2 + MeanAmp_P3 + MeanAmp_P2 +
Latency_N2 + Latency_P3 + Latency_P2 + calculator_age_cve + calculator_gender_cve +
(1 + talkergroup_final | Subject), data = ZOO_FCz_Pz)
anova(model2)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 746.06 746.06 1 60.505 5.0015 0.0290232 *
## MeanAmp_N2P2 638.64 638.64 1 229.118 4.2814 0.0396537 *
## MeanAmp_P3 14.46 14.46 1 223.769 0.0970 0.7558062
## MeanAmp_P2 526.73 526.73 1 245.382 3.5311 0.0614117 .
## Latency_N2 1860.98 1860.98 1 296.160 12.4759 0.0004780 ***
## Latency_P3 1968.88 1968.88 1 280.966 13.1992 0.0003329 ***
## Latency_P2 168.13 168.13 1 283.785 1.1271 0.2892957
## calculator_age_cve 438.41 438.41 1 61.754 2.9391 0.0914743 .
## calculator_gender_cve 2.39 2.39 1 56.866 0.0160 0.8998060
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Much simpler model
# USE THIS BECAUSE WE DO NOT ANTICIPATE that talkergroup_final:Emotion:Condition interaction contributes to the variability in accuracy scores?
model3 <- lmer(accuracy ~ talkergroup_final + MeanAmp_N2P2 + MeanAmp_P3 + MeanAmp_P2 + Latency_N2 + Latency_P3 +
Latency_P2 + calculator_age_cve + calculator_gender_cve +
(1 | Subject), data = ZOO_FCz_Pz)
anova(model3)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 693.61 693.61 1 63.712 4.6187 0.0354302 *
## MeanAmp_N2P2 492.09 492.09 1 226.373 3.2768 0.0715932 .
## MeanAmp_P3 2.31 2.31 1 227.797 0.0154 0.9014270
## MeanAmp_P2 423.38 423.38 1 266.449 2.8192 0.0943129 .
## Latency_N2 1694.73 1694.73 1 305.974 11.2850 0.0008803 ***
## Latency_P3 2212.51 2212.51 1 304.755 14.7329 0.0001506 ***
## Latency_P2 252.97 252.97 1 305.392 1.6845 0.1953093
## calculator_age_cve 332.42 332.42 1 68.262 2.2135 0.1414087
## calculator_gender_cve 119.49 119.49 1 62.674 0.7956 0.3758091
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
AIC(model1)
## [1] 2430.813
AIC(model2)
## [1] 2563.527
AIC(model3)
## [1] 2563.433
# FEATURE ENGINEER Go-NoGo difference score:
ZOO_FCz_Pz_DIFF <- ZOO_FCz_Pz %>%
group_by(Subject, Emotion) %>%
summarise(
disfluency = disfluency_sldper100words_final[Condition == "Go"],
talkergroup_final = talkergroup_final[Condition == "Go"],
calculator_age_cve = calculator_age_cve[Condition == "Go"],
calculator_gender_cve = calculator_gender_cve[Condition == "Go"],
MeanAmp_N2P2_DIFF = MeanAmp_N2P2[Condition == "NoGo"] - MeanAmp_N2P2[Condition == "Go"],
MeanAmp_P3_DIFF = MeanAmp_P3[Condition == "NoGo"] - MeanAmp_P3[Condition == "Go"],
Latency_N2_DIFF = Latency_N2[Condition == "NoGo"] - Latency_N2[Condition == "Go"],
Latency_P2_DIFF = Latency_P2[Condition == "NoGo"] - Latency_P3[Condition == "Go"],
Latency_P3_DIFF = Latency_P3[Condition == "NoGo"] - Latency_P3[Condition == "Go"],
premature_responses_DIFF = premature_responses[Condition == "NoGo"] - premature_responses[Condition == "Go"],
RT_proper_DIFF = RT_proper[Condition == "NoGo"] - RT_proper[Condition == "Go"],
sensitivity = sensitivity[Condition == "NoGo"],
inhibit = inhibit[Condition == "Go"],
shift = shift[Condition == "Go"],
emotionalCntrl = emotionalCntrl[Condition == "Go"],
workingMemory = workingMemory[Condition == "Go"],
BehavioralRegulationIndex_BRI = BehavioralRegulationIndex_BRI[Condition == "Go"],
MetacognitionIndex_MI = MetacognitionIndex_MI[Condition == "Go"],
GlobalExecutiveComposite_GEC = GlobalExecutiveComposite_GEC[Condition == "Go"]
)
## `summarise()` has grouped output by 'Subject'. You can override using the
## `.groups` argument.
# Sensitivity predicted by talkergroup, age, MetacognitionIndex_MI
model1 <- lmer(sensitivity ~ talkergroup_final*Emotion + MeanAmp_N2P2_DIFF + MeanAmp_P3_DIFF + Latency_N2_DIFF + Latency_P2_DIFF +
Latency_P3_DIFF + calculator_age_cve+calculator_gender_cve + inhibit +
shift + emotionalCntrl + workingMemory + BehavioralRegulationIndex_BRI + MetacognitionIndex_MI + GlobalExecutiveComposite_GEC + (1|Subject), data = ZOO_FCz_Pz_DIFF)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 0.9926 0.9926 1 69.541 1.1940 0.27829
## Emotion 0.0078 0.0078 1 71.718 0.0094 0.92307
## MeanAmp_N2P2_DIFF 0.2972 0.2972 1 134.979 0.3575 0.55088
## MeanAmp_P3_DIFF 1.2603 1.2603 1 121.879 1.5160 0.22059
## Latency_N2_DIFF 0.4894 0.4894 1 117.152 0.5887 0.44447
## Latency_P2_DIFF 0.0684 0.0684 1 130.021 0.0823 0.77467
## Latency_P3_DIFF 0.1803 0.1803 1 129.220 0.2169 0.64219
## calculator_age_cve 15.1751 15.1751 1 71.074 18.2542 5.897e-05
## calculator_gender_cve 0.6035 0.6035 1 68.096 0.7260 0.39717
## inhibit 0.0961 0.0961 1 66.078 0.1156 0.73490
## shift 0.0810 0.0810 1 66.207 0.0974 0.75594
## emotionalCntrl 1.1117 1.1117 1 67.236 1.3373 0.25160
## workingMemory 0.0002 0.0002 1 67.369 0.0003 0.98703
## MetacognitionIndex_MI 4.7002 4.7002 1 71.115 5.6539 0.02011
## talkergroup_final:Emotion 0.0890 0.0890 1 73.932 0.1070 0.74450
## BehavioralRegulationIndex_BRI
## GlobalExecutiveComposite_GEC
##
## talkergroup_final
## Emotion
## MeanAmp_N2P2_DIFF
## MeanAmp_P3_DIFF
## Latency_N2_DIFF
## Latency_P2_DIFF
## Latency_P3_DIFF
## calculator_age_cve ***
## calculator_gender_cve
## inhibit
## shift
## emotionalCntrl
## workingMemory
## MetacognitionIndex_MI *
## talkergroup_final:Emotion
## BehavioralRegulationIndex_BRI
## GlobalExecutiveComposite_GEC
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Sensitivity predicted by only talkergroup and age
model1 <- lmer(sensitivity ~ talkergroup_final*Emotion + MeanAmp_N2P2_DIFF + MeanAmp_P3_DIFF + Latency_N2_DIFF + Latency_P2_DIFF +
Latency_P3_DIFF + calculator_age_cve+calculator_gender_cve + (1|Subject), data = ZOO_FCz_Pz_DIFF)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 3.6121 3.6121 1 73.034 4.2282 0.04333 *
## Emotion 0.0015 0.0015 1 72.912 0.0017 0.96720
## MeanAmp_N2P2_DIFF 0.2720 0.2720 1 144.165 0.3184 0.57343
## MeanAmp_P3_DIFF 0.4266 0.4266 1 127.366 0.4994 0.48105
## Latency_N2_DIFF 0.2376 0.2376 1 124.331 0.2781 0.59886
## Latency_P2_DIFF 0.0088 0.0088 1 139.702 0.0103 0.91918
## Latency_P3_DIFF 0.2237 0.2237 1 136.688 0.2618 0.60970
## calculator_age_cve 12.6732 12.6732 1 72.872 14.8350 0.00025 ***
## calculator_gender_cve 0.5481 0.5481 1 73.941 0.6416 0.42569
## talkergroup_final:Emotion 0.3668 0.3668 1 75.443 0.4293 0.51431
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#MODERATOR ANALYSIS WITH ALL CORTICAL MEASURES:
# Select columns from ZOO_FCz_Pz
selected_df <- dplyr::select(ZOO_FCz_Pz, Subject, talkergroup_final, accuracy, calculator_age_cve, calculator_gender_cve, Condition, Emotion, MeanAmp_N2P2, MeanAmp_N2, MeanAmp_P2, MeanAmp_P3, Latency_N2, Latency_P2, Latency_P3)
# Converting Condition and Emotion into numerical variables:
selected_df$Condition <- as.integer(selected_df$Condition == "Go") # Go =1
selected_df$Emotion <- as.integer(selected_df$Emotion == "Neg") # Neg = 1
# Scaling the variables
# Specify the columns you want to scale
columns_to_scale <- c("MeanAmp_N2P2", "MeanAmp_N2", "MeanAmp_P2","MeanAmp_P3", "Latency_N2","Latency_P2", "Latency_P3", "calculator_age_cve")
# Subset the data frame to include only the columns you want to scale
selected_df_to_scale <- selected_df[, columns_to_scale]
# Scale the selected columns
selected_df_scaled <- as.data.frame(scale(selected_df_to_scale))
# Get the Unscaled columns + Subjects Column
# Because accuracy is the dependent measure no need to scale it!
selected_df_unscaled <- subset(selected_df, select = c( Subject, talkergroup_final, accuracy, calculator_gender_cve, Condition, Emotion))
# Combine the scaled subset with the original data frame
final_merged <- cbind(selected_df_scaled, selected_df_unscaled)
# MODEL 1
model1 <- lmer(accuracy ~ talkergroup_final*Condition*Emotion + MeanAmp_N2P2 + MeanAmp_P3 +MeanAmp_P2 + Latency_N2 + Latency_P3 +
Latency_P2 + calculator_age_cve + (1|Subject), data = final_merged)
anova(model1)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value
## talkergroup_final 358.0 358.0 1 249.174 3.7322
## Condition 4132.8 4132.8 1 249.491 43.0796
## Emotion 4.8 4.8 1 228.837 0.0502
## MeanAmp_N2P2 41.7 41.7 1 254.180 0.4346
## MeanAmp_P3 367.9 367.9 1 250.511 3.8352
## MeanAmp_P2 377.2 377.2 1 271.324 3.9315
## Latency_N2 93.1 93.1 1 298.794 0.9706
## Latency_P3 211.9 211.9 1 300.404 2.2085
## Latency_P2 4.1 4.1 1 299.603 0.0431
## calculator_age_cve 717.9 717.9 1 81.798 7.4836
## talkergroup_final:Condition 162.2 162.2 1 230.312 1.6911
## talkergroup_final:Emotion 22.0 22.0 1 230.006 0.2291
## Condition:Emotion 19.8 19.8 1 228.470 0.2065
## talkergroup_final:Condition:Emotion 39.7 39.7 1 232.369 0.4143
## Pr(>F)
## talkergroup_final 0.054506 .
## Condition 3.021e-10 ***
## Emotion 0.822950
## MeanAmp_N2P2 0.510354
## MeanAmp_P3 0.051298 .
## MeanAmp_P2 0.048397 *
## Latency_N2 0.325330
## Latency_P3 0.138301
## Latency_P2 0.835638
## calculator_age_cve 0.007634 **
## talkergroup_final:Condition 0.194756
## talkergroup_final:Emotion 0.632651
## Condition:Emotion 0.649949
## talkergroup_final:Condition:Emotion 0.520431
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
AIC(model1)
## [1] 2394.798
# MODEL 2
model2 <- lmer(accuracy ~ talkergroup_final + MeanAmp_N2P2 + MeanAmp_P3 +MeanAmp_P2 + Latency_N2 + Latency_P3 +
Latency_P2 + calculator_age_cve + (1|Subject), data = final_merged)
anova(model2)
## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## talkergroup_final 723.70 723.70 1 64.706 4.8172 0.0317710 *
## MeanAmp_N2P2 488.12 488.12 1 228.376 3.2491 0.0727800 .
## MeanAmp_P3 2.07 2.07 1 229.797 0.0138 0.9067236
## MeanAmp_P2 405.16 405.16 1 267.765 2.6969 0.1017153
## Latency_N2 1646.65 1646.65 1 306.985 10.9609 0.0010415 **
## Latency_P3 2259.70 2259.70 1 305.837 15.0416 0.0001288 ***
## Latency_P2 276.67 276.67 1 306.344 1.8416 0.1757574
## calculator_age_cve 325.94 325.94 1 69.278 2.1696 0.1452899
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot1 <- plot_model(model1, type='pred', terms = c("MeanAmp_P3", "Emotion", "talkergroup_final")) + aes(linetype = group, color = group) +
theme_bw() +
labs(x = "Mean P3 Amplitude (Scaled)", y = "Accuracy (%)", fill = "talkergroup_final")+
theme(axis.title.x = element_text(family="Arial", color = "grey20", size = 14, angle = 0),
axis.title.y = element_text(family="Arial", color = "grey20", size = 14, angle = 90),
axis.text.x = element_text(family="Arial", color = "grey20", size = 12, angle = 0),
axis.text.y = element_text(family="Arial", color = "grey20", size = 12, angle = 0),
legend.text = element_text(family="Arial", color = "grey20", size = 12, angle = 0),
title = element_blank(),
legend.title = element_blank())+
scale_color_manual(values = c("mediumpurple1", "springgreen3"), labels = c("Neutral", "Affective")) +
scale_fill_manual(values = c("mediumpurple1", "springgreen3"), labels = c("Neutral", "Affective"))
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
print(plot1)
#FIT MULTIPLE LINEAR MODELS FOR EACH TALKERGROUP AND EMOTION
# Create an empty data frame to store the results
results_df <- data.frame(
Emotion = character(),
TalkerGroup = character(),
P_Value = numeric(),
stringsAsFactors = FALSE
)
# Create a subset of your data for each combination of emotion and talkergroup
subsets <- final_merged %>%
filter(Emotion %in% c(0,1), talkergroup_final %in% c(0, 1))
# Loop through each combination and extract the p-values
for (emotion in unique(subsets$Emotion)) {
for (group in unique(subsets$talkergroup_final)) {
subset_data <- subsets %>%
filter(Emotion == emotion, talkergroup_final == group)
# Fit a linear regression model for the current combination
model <- lm(accuracy ~ MeanAmp_P3, data = subset_data)
# Extract the p-value
p_value <- summary(model)$coefficients[2, 4] # Assumes MeanAmp_P3 is the second coefficient
# Create a data frame with the results
result_row <- data.frame(
Emotion = emotion,
TalkerGroup = paste("talkergroup", group),
P_Value = p_value
)
# Append the results to the results data frame
results_df <- rbind(results_df, result_row)
}
}
# Print the results
print(results_df)
## Emotion TalkerGroup P_Value
## 1 1 talkergroup 1 0.5245270
## 2 1 talkergroup 0 0.0993229
## 3 0 talkergroup 1 0.5895161
## 4 0 talkergroup 0 0.9969852
ZOO_Corr_DIFF_Neg_CWS <- subset(ZOO_FCz_Pz_DIFF, Emotion == "Neg" & talkergroup_final == 1)
ZOO_Corr_DIFF_Neg_CWNS <- subset(ZOO_FCz_Pz_DIFF, Emotion == "Neg" & talkergroup_final == 0)
ZOO_Corr_DIFF_Neut_CWS <- subset(ZOO_FCz_Pz_DIFF, Emotion == "Neut" & talkergroup_final == 1)
ZOO_Corr_DIFF_Neut_CWNS <- subset(ZOO_FCz_Pz_DIFF, Emotion == "Neut" & talkergroup_final == 0)
ZOO_Corr_NoGo_Neg_CWS <- subset(ZOO_FCz_Pz, Condition == "NoGo" & Emotion == "Neg" & talkergroup_final == 1)
ZOO_Corr_NoGo_Neut_CWS <- subset(ZOO_FCz_Pz, Condition == "NoGo" & Emotion == "Neut" & talkergroup_final == 1)
ZOO_Corr_Go_Neg_CWS <- subset(ZOO_FCz_Pz, Condition == "Go" & Emotion == "Neg" & talkergroup_final == 1)
ZOO_Corr_Go_Neut_CWS <- subset(ZOO_FCz_Pz, Condition == "Go" & Emotion == "Neut" & talkergroup_final == 1)
ZOO_Corr_NoGo_Neg_CWNS <- subset(ZOO_FCz_Pz, Condition == "NoGo" & Emotion == "Neg" & talkergroup_final == 0)
ZOO_Corr_NoGo_Neut_CWNS <- subset(ZOO_FCz_Pz, Condition == "NoGo" & Emotion == "Neut" & talkergroup_final == 0)
ZOO_Corr_Go_Neg_CWNS <- subset(ZOO_FCz_Pz, Condition == "Go" & Emotion == "Neg" & talkergroup_final == 0)
ZOO_Corr_Go_Neut_CWNS <- subset(ZOO_FCz_Pz, Condition == "Go" & Emotion == "Neut" & talkergroup_final == 0)
rownames(ZOO_Corr_DIFF_Neg_CWS) <- NULL
rownames(ZOO_Corr_DIFF_Neg_CWNS) <- NULL
rownames(ZOO_Corr_DIFF_Neut_CWS) <- NULL
rownames(ZOO_Corr_DIFF_Neut_CWNS) <- NULL
rownames(ZOO_Corr_NoGo_Neg_CWS) <- NULL
rownames(ZOO_Corr_NoGo_Neut_CWS) <- NULL
rownames(ZOO_Corr_Go_Neg_CWS) <- NULL
rownames(ZOO_Corr_Go_Neut_CWS) <- NULL
rownames(ZOO_Corr_NoGo_Neg_CWNS) <- NULL
rownames(ZOO_Corr_NoGo_Neut_CWNS) <- NULL
rownames(ZOO_Corr_Go_Neg_CWNS) <- NULL
rownames(ZOO_Corr_Go_Neut_CWNS) <- NULL
# Bind the datasets row-wise
ZOO_Corr_NoGo_CWS <- bind_rows(ZOO_Corr_NoGo_Neg_CWS, ZOO_Corr_NoGo_Neut_CWS)
# Group by variable and calculate the mean for each variable
ZOO_Corr_NoGo_CWS <- ZOO_Corr_NoGo_CWS %>%
group_by(Subject) %>%
summarise_all(mean)
## Warning: There were 370 warnings in `summarise()`.
## The first warning was:
## ℹ In argument: `Condition = (new("standardGeneric", .Data = function (x, ...)
## ...`.
## ℹ In group 1: `Subject = "AE050318"`.
## Caused by warning in `mean.default()`:
## ! argument is not numeric or logical: returning NA
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 369 remaining warnings.
# Bind the datasets row-wise
ZOO_Corr_NoGo_CWNS <- bind_rows(ZOO_Corr_NoGo_Neg_CWNS, ZOO_Corr_NoGo_Neut_CWNS)
# Group by variable and calculate the mean for each variable
ZOO_Corr_NoGo_CWNS <- ZOO_Corr_NoGo_CWNS %>%
group_by(Subject) %>%
summarise_all(mean)
## Warning: There were 420 warnings in `summarise()`.
## The first warning was:
## ℹ In argument: `Condition = (new("standardGeneric", .Data = function (x, ...)
## ...`.
## ℹ In group 1: `Subject = "AH101121"`.
## Caused by warning in `mean.default()`:
## ! argument is not numeric or logical: returning NA
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 419 remaining warnings.
#DRAW CORRELATION PLOTS###########################
correlation_analysis <- function(df1, df2, var1, var2) {
# Perform the correlation test
correlation_CWS <- cor.test(df1[[var1]], df1[[var2]], use="complete.obs") #exclude pairs with missing values
correlation_CWNS <- cor.test(df2[[var1]], df2[[var2]], use="complete.obs")
# Create a scatter plot with a regression line and confidence interval
# use `aes_string()` or aes with !!sym when you're passing variable names as strings.
title <- paste(deparse(substitute(df1)), "and", deparse(substitute(df2)))
ggplot() +
geom_point(data = df1, aes(x = !!sym(var1), y = !!sym(var2)), color = "red") +
geom_smooth(data = df1, aes(x = !!sym(var1), y = !!sym(var2)), method = "lm", se = TRUE, col = "red") +
geom_point(data = df2, aes(x = !!sym(var1), y = !!sym(var2)), color = "blue") +
geom_smooth(data = df2, aes(x = !!sym(var1), y = !!sym(var2)), method = "lm", se = TRUE, col = "blue") +
labs(title = title, x = "ERP Amplitude (µV)", y = "Accuracy (%)") +
theme_minimal() + # Set background to white
theme(panel.grid.major = element_blank(), # Remove major grid lines
panel.grid.minor = element_blank(), # Remove minor grid lines
axis.line = element_line(color = "black"),
axis.text = element_text(size = 14), # Increase font size for both axis labels and tick labels
axis.title = element_text(size = 16)) + # Increase font size for axis labels
# ylim(40, 110) + # Replace ymin_value and ymax_value with your desired limits
annotate("text", x = mean(df1[[var1]]), y = max(df1[[var2]]),
label = paste("CWS r =", round(correlation_CWS$estimate, 3)), col = "red") +
annotate("text", x = mean(df1[[var1]]), y = max(df1[[var2]]),
label = paste("p-value =", round(correlation_CWS$p.value, 3)), vjust = -2, col = "red") +
annotate("text", x = mean(df2[[var1]]), y = min(df2[[var2]]) ,
label = paste("CWNS r =", round(correlation_CWNS$estimate, 3)), col = "blue") +
annotate("text", x = mean(df2[[var1]]), y = min(df2[[var2]]),
label = paste("p-value =", round(correlation_CWNS$p.value, 3)), vjust = -2, col = "blue") +
theme(legend.position = "none")
}
correlation_analysis(ZOO_Corr_NoGo_Neg_CWS, ZOO_Corr_NoGo_Neg_CWNS, "MeanAmp_N2P2", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_NoGo_Neut_CWS, ZOO_Corr_NoGo_Neut_CWNS, "MeanAmp_N2P2", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_Go_Neg_CWS, ZOO_Corr_Go_Neg_CWNS, "MeanAmp_N2P2", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_NoGo_Neg_CWS, ZOO_Corr_NoGo_Neg_CWNS, "MeanAmp_P3", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_NoGo_Neut_CWS, ZOO_Corr_NoGo_Neut_CWNS, "MeanAmp_P3", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_Go_Neg_CWS, ZOO_Corr_Go_Neg_CWNS, "MeanAmp_P3", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_P3", "accuracy")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
#PERSONALIZED PLOT######
correlation_CWS_Neg_NOGO <- cor.test(ZOO_Corr_NoGo_Neg_CWS$MeanAmp_P3, ZOO_Corr_NoGo_Neg_CWS$accuracy)
correlation_CWNS_Neg_NOGO <- cor.test(ZOO_Corr_NoGo_Neg_CWNS$MeanAmp_P3, ZOO_Corr_NoGo_Neg_CWNS$accuracy)
# Create a scatter plot with a regression line and confidence interval
ggplot() +
geom_point(data = ZOO_Corr_NoGo_Neg_CWNS, aes(x = MeanAmp_P3, y = accuracy), color = "royalblue") +
geom_smooth(data = ZOO_Corr_NoGo_Neg_CWNS, aes(x = MeanAmp_P3, y = accuracy), method = "lm", se = TRUE, col = "royalblue") +
geom_point(data = ZOO_Corr_NoGo_Neg_CWS, aes(x = MeanAmp_P3, y = accuracy), color = "orangered") +
geom_smooth(data = ZOO_Corr_NoGo_Neg_CWS, aes(x = MeanAmp_P3, y = accuracy), method = "lm", se = TRUE, col = "orangered") +
labs(title = "CWNS in Affective GO and NOGO", x = "P3 Amplitude (µV)", y = "Accuracy (%)") +
theme_minimal() + # Set background to white
theme(panel.grid.major = element_blank(), # Remove major grid lines
panel.grid.minor = element_blank(), # Remove minor grid lines
axis.line = element_line(color = "black"),
axis.text = element_text(size = 14), # Increase font size for both axis labels and tick labels
axis.title = element_text(size = 16)) + # Increase font size for axis labels
ylim(40, 110) + # Replace ymin_value and ymax_value with your desired limits
annotate("text", x = mean(ZOO_Corr_NoGo_Neg_CWNS$MeanAmp_P3), y = max(ZOO_Corr_NoGo_Neg_CWNS$accuracy) + 5,
label = paste("CWNS Affective NOGO r =", round(correlation_CWNS_Neg_NOGO$estimate, 3)), vjust = -1, col = "royalblue") +
annotate("text", x = mean(ZOO_Corr_NoGo_Neg_CWNS$MeanAmp_P3), y = max(ZOO_Corr_NoGo_Neg_CWNS$accuracy),
label = paste("p-value =", round(correlation_CWNS_Neg_NOGO$p.value, 3)), vjust = -1, col = "royalblue") +
annotate("text", x = mean(ZOO_Corr_NoGo_Neg_CWS$MeanAmp_P3), y = min(ZOO_Corr_NoGo_Neg_CWS$accuracy) + 5,
label = paste("CWS Affective NOGO r =", round(correlation_CWS_Neg_NOGO$estimate, 3)), vjust = -1, col = "orangered") +
annotate("text", x = mean(ZOO_Corr_NoGo_Neg_CWS$MeanAmp_P3), y = min(ZOO_Corr_NoGo_Neg_CWS$accuracy),
label = paste("p-value =", round(correlation_CWS_Neg_NOGO$p.value, 3)), vjust = -1, col = "orangered") +
theme(legend.position = "none")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "sensitivity")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "premature_responses_DIFF")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "RT_proper_DIFF")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 2 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 2 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "inhibit")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "shift")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "workingMemory")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "BehavioralRegulationIndex_BRI")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "MetacognitionIndex_MI")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_DIFF_Neut_CWS, ZOO_Corr_DIFF_Neut_CWNS, "MeanAmp_N2P2_DIFF", "GlobalExecutiveComposite_GEC")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "sensitivity")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "premature_responses")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "RT_proper")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "inhibit")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "shift")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "emotionalCntrl")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "workingMemory")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "BehavioralRegulationIndex_BRI")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "MetacognitionIndex_MI")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
correlation_analysis(ZOO_Corr_Go_Neut_CWS, ZOO_Corr_Go_Neut_CWNS, "MeanAmp_N2P2", "GlobalExecutiveComposite_GEC")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1 rows containing missing values (`geom_point()`).
## Removed 1 rows containing missing values (`geom_point()`).
## Warning: Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
## Removed 1 rows containing missing values (`geom_text()`).
# REGRESSION WITH KNOTS
# install.packages("segmented")
library(segmented)
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
##
## select
## Loading required package: nlme
##
## Attaching package: 'nlme'
## The following object is masked from 'package:lme4':
##
## lmList
## The following object is masked from 'package:dplyr':
##
## collapse
# Assuming df1 is your data frame, and var1 and var2 are the variables of interest
# Fit a segmented linear model with two knots
model <- lm(GlobalExecutiveComposite_GEC ~ MeanAmp_N2P2_DIFF, data = ZOO_Corr_DIFF_Neg_CWS)
# Specify the breakpoints (knots)
breakpoints <- segmented(model, seg.Z = ~MeanAmp_N2P2_DIFF, psi = list(MeanAmp_N2P2_DIFF = c(-4, 2)))
# Print the summary of the segmented model
summary(breakpoints)
##
## ***Regression Model with Segmented Relationship(s)***
##
## Call:
## segmented.lm(obj = model, seg.Z = ~MeanAmp_N2P2_DIFF, psi = list(MeanAmp_N2P2_DIFF = c(-4,
## 2)))
##
## Estimated Break-Point(s):
## Est. St.Err
## psi1.MeanAmp_N2P2_DIFF -7.370 25.077
## psi2.MeanAmp_N2P2_DIFF -1.098 1.623
##
## Coefficients of the linear terms:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 257.410 3583.380 0.072 0.943
## MeanAmp_N2P2_DIFF 18.026 407.762 0.044 0.965
## U1.MeanAmp_N2P2_DIFF -23.072 407.784 -0.057 NA
## U2.MeanAmp_N2P2_DIFF 10.398 4.619 2.251 NA
##
## Residual standard error: 21.5 on 30 degrees of freedom
## Multiple R-Squared: 0.2526, Adjusted R-squared: 0.1281
##
## Boot restarting based on 7 samples. Last fit:
## Convergence attained in 3 iterations (rel. change 5.4457e-07)
# correlation_CWS <- cor.test(predict(breakpoints), ZOO_Corr_DIFF_Neg_CWS$GlobalExecutiveComposite_GEC, use = "complete.obs")
# Create a data frame for prediction
pred_data <- data.frame(MeanAmp_N2P2_DIFF = seq(min(ZOO_Corr_DIFF_Neg_CWS$MeanAmp_N2P2_DIFF), max(ZOO_Corr_DIFF_Neg_CWS$MeanAmp_N2P2_DIFF), length.out = 100))
# Predict the values using the segmented model
pred_values <- predict(breakpoints, newdata = pred_data)
# lwr <- pred_values[, "lwr"]
# upr <- pred_values[, "upr"]
# Create a scatter plot with regression line and confidence interval
ggplot(ZOO_Corr_DIFF_Neg_CWS, aes(x = MeanAmp_N2P2_DIFF, y = GlobalExecutiveComposite_GEC)) +
geom_point() +
geom_line(data = pred_data, aes(x = MeanAmp_N2P2_DIFF, y = pred_values), color = "blue") +
# geom_ribbon(data = pred_data, aes(x = MeanAmp_N2P2_DIFF, ymin = lwr, ymax = upr),
# fill = "blue", alpha = 0.3, linetype = "dashed") +
labs(title = "Scatter Plot with Regression Line and Confidence Interval",
x = "Variable 1",
y = "Variable 2") +
theme_minimal()
## Warning: Removed 1 rows containing missing values (`geom_point()`).